#include "radv_private.h"
#include "radv_radeon_winsys.h"
+#include "radv_shader.h"
#include "radv_cs.h"
#include "sid.h"
#include "gfx9d.h"
#include "vk_format.h"
+#include "radv_debug.h"
#include "radv_meta.h"
#include "ac_debug.h"
},
};
-void
-radv_dynamic_state_copy(struct radv_dynamic_state *dest,
- const struct radv_dynamic_state *src,
- uint32_t copy_mask)
+static void
+radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_dynamic_state *src)
{
+ struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
+ uint32_t copy_mask = src->mask;
+ uint32_t dest_mask = 0;
+
+ /* Make sure to copy the number of viewports/scissors because they can
+ * only be specified at pipeline creation time.
+ */
+ dest->viewport.count = src->viewport.count;
+ dest->scissor.count = src->scissor.count;
+
if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
- dest->viewport.count = src->viewport.count;
- typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
- src->viewport.count);
+ if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
+ src->viewport.count * sizeof(VkViewport))) {
+ typed_memcpy(dest->viewport.viewports,
+ src->viewport.viewports,
+ src->viewport.count);
+ dest_mask |= 1 << VK_DYNAMIC_STATE_VIEWPORT;
+ }
}
if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
- dest->scissor.count = src->scissor.count;
- typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
- src->scissor.count);
+ if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
+ src->scissor.count * sizeof(VkRect2D))) {
+ typed_memcpy(dest->scissor.scissors,
+ src->scissor.scissors, src->scissor.count);
+ dest_mask |= 1 << VK_DYNAMIC_STATE_SCISSOR;
+ }
+ }
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+ if (dest->line_width != src->line_width) {
+ dest->line_width = src->line_width;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_LINE_WIDTH;
+ }
}
- if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
- dest->line_width = src->line_width;
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+ if (memcmp(&dest->depth_bias, &src->depth_bias,
+ sizeof(src->depth_bias))) {
+ dest->depth_bias = src->depth_bias;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BIAS;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
- dest->depth_bias = src->depth_bias;
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+ if (memcmp(&dest->blend_constants, &src->blend_constants,
+ sizeof(src->blend_constants))) {
+ typed_memcpy(dest->blend_constants,
+ src->blend_constants, 4);
+ dest_mask |= 1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
- typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+ if (memcmp(&dest->depth_bounds, &src->depth_bounds,
+ sizeof(src->depth_bounds))) {
+ dest->depth_bounds = src->depth_bounds;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
- dest->depth_bounds = src->depth_bounds;
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+ if (memcmp(&dest->stencil_compare_mask,
+ &src->stencil_compare_mask,
+ sizeof(src->stencil_compare_mask))) {
+ dest->stencil_compare_mask = src->stencil_compare_mask;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
- dest->stencil_compare_mask = src->stencil_compare_mask;
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+ if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
+ sizeof(src->stencil_write_mask))) {
+ dest->stencil_write_mask = src->stencil_write_mask;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
- dest->stencil_write_mask = src->stencil_write_mask;
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+ if (memcmp(&dest->stencil_reference, &src->stencil_reference,
+ sizeof(src->stencil_reference))) {
+ dest->stencil_reference = src->stencil_reference;
+ dest_mask |= 1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE;
+ }
+ }
- if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
- dest->stencil_reference = src->stencil_reference;
+ cmd_buffer->state.dirty |= dest_mask;
}
bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
VkCommandBuffer* pCommandBuffer)
{
struct radv_cmd_buffer *cmd_buffer;
- VkResult result;
unsigned ring;
- cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- memset(cmd_buffer, 0, sizeof(*cmd_buffer));
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
- result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
- cmd_buffer->upload.offset = 0;
- cmd_buffer->upload.size = 0;
list_inithead(&cmd_buffer->upload.list);
return VK_SUCCESS;
-
-fail:
- vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
-
- return result;
}
static void
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
-static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
+static VkResult
+radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
free(up);
}
+ cmd_buffer->push_constant_stages = 0;
cmd_buffer->scratch_size_needed = 0;
cmd_buffer->compute_scratch_size_needed = 0;
cmd_buffer->esgs_ring_size_needed = 0;
cmd_buffer->sample_positions_needed = false;
if (cmd_buffer->upload.upload_bo)
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs,
- cmd_buffer->upload.upload_bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ cmd_buffer->upload.upload_bo, 8);
cmd_buffer->upload.offset = 0;
- cmd_buffer->record_fail = false;
+ cmd_buffer->record_result = VK_SUCCESS;
cmd_buffer->ring_offsets_idx = -1;
&fence_ptr);
cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
}
+
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
+
+ return cmd_buffer->record_result;
}
static bool
bo = device->ws->buffer_create(device->ws,
new_size, 4096,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|
+ RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!bo) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return false;
}
- device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
if (cmd_buffer->upload.upload_bo) {
upload = malloc(sizeof(*upload));
if (!upload) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
device->ws->buffer_destroy(bo);
return false;
}
cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
if (!cmd_buffer->upload.map) {
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return false;
}
return true;
}
+static void
+radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
+ unsigned count, const uint32_t *data)
+{
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit_array(cs, data, count);
+}
+
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint64_t va;
- if (!device->trace_bo)
- return;
-
- va = device->ws->buffer_get_va(device->trace_bo);
+ va = radv_buffer_get_va(device->trace_bo);
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
+ va += 4;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
++cmd_buffer->state.trace_id;
- device->ws->cs_add_buffer(cs, device->trace_bo, 8);
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
- radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_ME));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- radeon_emit(cs, cmd_buffer->state.trace_id);
+ radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
+ radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
}
+static void
+radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
+ enum radv_cmd_flush_bits flags;
+
+ /* Force wait for graphics/compute engines to be idle. */
+ flags = RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
+
+ si_cs_emit_cache_flush(cmd_buffer->cs, false,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ NULL, 0,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ flags);
+ }
+
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
+}
+
+static void
+radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline, enum ring_type ring)
+{
+ struct radv_device *device = cmd_buffer->device;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ uint32_t data[2];
+ uint64_t va;
+
+ va = radv_buffer_get_va(device->trace_bo);
+
+ switch (ring) {
+ case RING_GFX:
+ va += 8;
+ break;
+ case RING_COMPUTE:
+ va += 16;
+ break;
+ default:
+ assert(!"invalid ring type");
+ }
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
+ cmd_buffer->cs, 6);
+
+ data[0] = (uintptr_t)pipeline;
+ data[1] = (uintptr_t)pipeline >> 32;
+
+ radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
+ radv_emit_write_data_packet(cs, va, 2, data);
+}
+
+void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_descriptor_set *set,
+ unsigned idx)
+{
+ cmd_buffer->descriptors[idx] = set;
+ if (set)
+ cmd_buffer->state.valid_descriptors |= (1u << idx);
+ else
+ cmd_buffer->state.valid_descriptors &= ~(1u << idx);
+ cmd_buffer->state.descriptors_dirty |= (1u << idx);
+
+}
+
+static void
+radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_device *device = cmd_buffer->device;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ uint32_t data[MAX_SETS * 2] = {};
+ uint64_t va;
+ unsigned i;
+ va = radv_buffer_get_va(device->trace_bo) + 24;
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
+ cmd_buffer->cs, 4 + MAX_SETS * 2);
+
+ for_each_bit(i, cmd_buffer->state.valid_descriptors) {
+ struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
+ data[i * 2] = (uintptr_t)set;
+ data[i * 2 + 1] = (uintptr_t)set >> 32;
+ }
+
+ radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
+ radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
+}
+
static void
radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask);
if (cmd_buffer->device->physical_device->has_rbplus) {
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028760_SX_MRT0_BLEND_OPT, 8);
+ radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.sx_mrt_blend_opt, 8);
+
radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
radeon_emit(cmd_buffer->cs, 0); /* R_028754_SX_PS_DOWNCONVERT */
radeon_emit(cmd_buffer->cs, 0); /* R_028758_SX_BLEND_OPT_EPSILON */
radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
}
-/* 12.4 fixed-point */
-static unsigned radv_pack_float_12p4(float x)
-{
- return x <= 0 ? 0 :
- x >= 4096 ? 0xffff : x * 16;
-}
-
-uint32_t
-radv_shader_stage_to_user_data_0(gl_shader_stage stage, bool has_gs, bool has_tess)
-{
- switch (stage) {
- case MESA_SHADER_FRAGMENT:
- return R_00B030_SPI_SHADER_USER_DATA_PS_0;
- case MESA_SHADER_VERTEX:
- if (has_tess)
- return R_00B530_SPI_SHADER_USER_DATA_LS_0;
- else
- return has_gs ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B130_SPI_SHADER_USER_DATA_VS_0;
- case MESA_SHADER_GEOMETRY:
- return R_00B230_SPI_SHADER_USER_DATA_GS_0;
- case MESA_SHADER_COMPUTE:
- return R_00B900_COMPUTE_USER_DATA_0;
- case MESA_SHADER_TESS_CTRL:
- return R_00B430_SPI_SHADER_USER_DATA_HS_0;
- case MESA_SHADER_TESS_EVAL:
- if (has_gs)
- return R_00B330_SPI_SHADER_USER_DATA_ES_0;
- else
- return R_00B130_SPI_SHADER_USER_DATA_VS_0;
- default:
- unreachable("unknown shader");
- }
-}
-
struct ac_userdata_info *
radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
int idx)
{
+ if (stage == MESA_SHADER_VERTEX) {
+ if (pipeline->shaders[MESA_SHADER_VERTEX])
+ return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
+ if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
+ return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
+ if (pipeline->shaders[MESA_SHADER_GEOMETRY])
+ return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
+ } else if (stage == MESA_SHADER_TESS_EVAL) {
+ if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
+ return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
+ if (pipeline->shaders[MESA_SHADER_GEOMETRY])
+ return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
+ }
return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
}
int idx, uint64_t va)
{
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
- uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[stage];
if (loc->sgpr_idx == -1)
return;
assert(loc->num_sgprs == 2);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
- radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa);
- radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
+ radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, ms->db_eqaa);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
- if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
+ if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples &&
+ old_pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions == pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
return;
- radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
/* GFX9: Flush DFSM when the AA mode changes. */
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (cmd_buffer->device->dfsm_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
}
if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) {
uint32_t offset;
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_FRAGMENT, AC_UD_PS_SAMPLE_POS_OFFSET);
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_FRAGMENT, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_FRAGMENT];
if (loc->sgpr_idx == -1)
return;
assert(loc->num_sgprs == 1);
radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
raster->pa_cl_clip_cntl);
-
radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
raster->spi_interp_control);
-
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2);
- unsigned tmp = (unsigned)(1.0 * 8.0);
- radeon_emit(cmd_buffer->cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
- radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
- S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */
-
radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
raster->pa_su_vtx_cntl);
-
radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
raster->pa_su_sc_mode_cntl);
}
static inline void
-radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
- unsigned size)
+radv_emit_prefetch_TC_L2_async(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
+ unsigned size)
{
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
si_cp_dma_prefetch(cmd_buffer, va, size);
}
static void
-radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
- struct radv_shader_variant *shader,
- struct ac_vs_output_info *outinfo)
+radv_emit_VBO_descriptors_prefetch(struct radv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->state.vb_prefetch_dirty) {
+ radv_emit_prefetch_TC_L2_async(cmd_buffer,
+ cmd_buffer->state.vb_va,
+ cmd_buffer->state.vb_size);
+ cmd_buffer->state.vb_prefetch_dirty = false;
+ }
+}
+
+static void
+radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_shader_variant *shader)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
- unsigned export_count;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ uint64_t va;
+
+ if (!shader)
+ return;
+
+ va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+
+ radv_cs_add_buffer(ws, cs, shader->bo, 8);
+ radv_emit_prefetch_TC_L2_async(cmd_buffer, va, shader->code_size);
+}
+
+static void
+radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
+{
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_VERTEX]);
+ radv_emit_VBO_descriptors_prefetch(cmd_buffer);
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_CTRL]);
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_EVAL]);
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_GEOMETRY]);
+ radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_FRAGMENT]);
+}
- ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, shader->code_size);
+static void
+radv_emit_hw_vs(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ struct radv_shader_variant *shader)
+{
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
- export_count = MAX2(1, outinfo->param_exports);
radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(export_count - 1));
+ pipeline->graphics.vs.spi_vs_out_config);
radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT,
- S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
- S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE) |
- S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
- V_02870C_SPI_SHADER_4COMP :
- V_02870C_SPI_SHADER_NONE));
-
+ pipeline->graphics.vs.spi_shader_pos_format);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
radeon_emit(cmd_buffer->cs, va >> 8);
radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL,
- pipeline->graphics.pa_cl_vs_out_cntl);
+ pipeline->graphics.vs.pa_cl_vs_out_cntl);
if (cmd_buffer->device->physical_device->rad_info.chip_class <= VI)
radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF,
- S_028AB4_REUSE_OFF(outinfo->writes_viewport_index));
+ pipeline->graphics.vs.vgt_reuse_off);
}
static void
radv_emit_hw_es(struct radv_cmd_buffer *cmd_buffer,
- struct radv_shader_variant *shader,
- struct ac_es_output_info *outinfo)
+ struct radv_pipeline *pipeline,
+ struct radv_shader_variant *shader)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
-
- ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, shader->code_size);
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
- radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
- outinfo->esgs_itemsize / 4);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
radeon_emit(cmd_buffer->cs, va >> 8);
radeon_emit(cmd_buffer->cs, va >> 40);
radv_emit_hw_ls(struct radv_cmd_buffer *cmd_buffer,
struct radv_shader_variant *shader)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
uint32_t rsrc2 = shader->rsrc2;
- ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, shader->code_size);
-
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
radeon_emit(cmd_buffer->cs, va >> 8);
radeon_emit(cmd_buffer->cs, va >> 40);
radv_emit_hw_hs(struct radv_cmd_buffer *cmd_buffer,
struct radv_shader_variant *shader)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo);
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
- ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, shader->code_size);
-
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
- radeon_emit(cmd_buffer->cs, va >> 8);
- radeon_emit(cmd_buffer->cs, va >> 40);
- radeon_emit(cmd_buffer->cs, shader->rsrc1);
- radeon_emit(cmd_buffer->cs, shader->rsrc2);
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B410_SPI_SHADER_PGM_LO_LS, 2);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, va >> 40);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 2);
+ radeon_emit(cmd_buffer->cs, shader->rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->rsrc2 |
+ S_00B42C_LDS_SIZE(cmd_buffer->state.pipeline->graphics.tess.lds_size));
+ } else {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, va >> 40);
+ radeon_emit(cmd_buffer->cs, shader->rsrc1);
+ radeon_emit(cmd_buffer->cs, shader->rsrc2);
+ }
}
static void
{
struct radv_shader_variant *vs;
- assert (pipeline->shaders[MESA_SHADER_VERTEX]);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
+ /* Skip shaders merged into HS/GS */
vs = pipeline->shaders[MESA_SHADER_VERTEX];
+ if (!vs)
+ return;
if (vs->info.vs.as_ls)
radv_emit_hw_ls(cmd_buffer, vs);
else if (vs->info.vs.as_es)
- radv_emit_hw_es(cmd_buffer, vs, &vs->info.vs.es_info);
+ radv_emit_hw_es(cmd_buffer, pipeline, vs);
else
- radv_emit_hw_vs(cmd_buffer, pipeline, vs, &vs->info.vs.outinfo);
-
- radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
+ radv_emit_hw_vs(cmd_buffer, pipeline, vs);
}
tcs = pipeline->shaders[MESA_SHADER_TESS_CTRL];
tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
- if (tes->info.tes.as_es)
- radv_emit_hw_es(cmd_buffer, tes, &tes->info.tes.es_info);
- else
- radv_emit_hw_vs(cmd_buffer, pipeline, tes, &tes->info.tes.outinfo);
+ if (tes) {
+ if (tes->info.tes.as_es)
+ radv_emit_hw_es(cmd_buffer, pipeline, tes);
+ else
+ radv_emit_hw_vs(cmd_buffer, pipeline, tes);
+ }
radv_emit_hw_hs(cmd_buffer, tcs);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_CTRL, AC_UD_TCS_OFFCHIP_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_CTRL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_TESS_CTRL];
assert(loc->num_sgprs == 4);
assert(!loc->indirect);
radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 4);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_EVAL, AC_UD_TES_OFFCHIP_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_EVAL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_TESS_EVAL];
assert(loc->num_sgprs == 1);
assert(!loc->indirect);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX, AC_UD_VS_LS_TCS_IN_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[MESA_SHADER_VERTEX];
assert(loc->num_sgprs == 1);
assert(!loc->indirect);
radv_emit_geometry_shader(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radv_shader_variant *gs;
uint64_t va;
S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
S_028B90_ENABLE(gs_num_invocations > 0));
- va = ws->buffer_get_va(gs->bo);
- ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, gs->code_size);
+ radeon_set_context_reg(cmd_buffer->cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ pipeline->graphics.gs.vgt_esgs_ring_itemsize);
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
- radeon_emit(cmd_buffer->cs, va >> 8);
- radeon_emit(cmd_buffer->cs, va >> 40);
- radeon_emit(cmd_buffer->cs, gs->rsrc1);
- radeon_emit(cmd_buffer->cs, gs->rsrc2);
+ va = radv_buffer_get_va(gs->bo) + gs->bo_offset;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B210_SPI_SHADER_PGM_LO_ES, 2);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, va >> 40);
+
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+ radeon_emit(cmd_buffer->cs, gs->rsrc1);
+ radeon_emit(cmd_buffer->cs, gs->rsrc2 |
+ S_00B22C_LDS_SIZE(pipeline->graphics.gs.lds_size));
- radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader, &pipeline->gs_copy_shader->info.vs.outinfo);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A44_VGT_GS_ONCHIP_CNTL, pipeline->graphics.gs.vgt_gs_onchip_cntl);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, pipeline->graphics.gs.vgt_gs_max_prims_per_subgroup);
+ } else {
+ radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
+ radeon_emit(cmd_buffer->cs, va >> 8);
+ radeon_emit(cmd_buffer->cs, va >> 40);
+ radeon_emit(cmd_buffer->cs, gs->rsrc1);
+ radeon_emit(cmd_buffer->cs, gs->rsrc2);
+ }
+
+ radv_emit_hw_vs(cmd_buffer, pipeline, pipeline->gs_copy_shader);
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
AC_UD_GS_VS_RING_STRIDE_ENTRIES);
radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer,
struct radv_pipeline *pipeline)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radv_shader_variant *ps;
uint64_t va;
unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
-
- va = ws->buffer_get_va(ps->bo);
- ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, ps->code_size);
+ va = radv_buffer_get_va(ps->bo) + ps->bo_offset;
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
radeon_emit(cmd_buffer->cs, va >> 8);
radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (cmd_buffer->device->dfsm_allowed) {
/* optimise this? */
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
}
}
-static void polaris_set_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
+static void
+radv_emit_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
{
- uint32_t vtx_reuse_depth = 30;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+
if (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10)
return;
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL]) {
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD)
- vtx_reuse_depth = 14;
- }
- radeon_set_context_reg(cmd_buffer->cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
+ radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ pipeline->graphics.vtx_reuse_depth);
}
static void
-radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
+radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+
if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
return;
radv_emit_tess_shaders(cmd_buffer, pipeline);
radv_emit_geometry_shader(cmd_buffer, pipeline);
radv_emit_fragment_shader(cmd_buffer, pipeline);
- polaris_set_vgt_vertex_reuse(cmd_buffer, pipeline);
+ radv_emit_vgt_vertex_reuse(cmd_buffer, pipeline);
cmd_buffer->scratch_size_needed =
MAX2(cmd_buffer->scratch_size_needed,
}
radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, pipeline->graphics.gs_out);
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
+
cmd_buffer->state.emitted_pipeline = pipeline;
+
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
}
static void
radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
{
uint32_t count = cmd_buffer->state.dynamic.scissor.count;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
+ si_emit_cache_flush(cmd_buffer);
+ }
si_write_scissors(cmd_buffer->cs, 0, count,
cmd_buffer->state.dynamic.scissor.scissors,
cmd_buffer->state.dynamic.viewport.viewports,
cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0));
}
+static void
+radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
+{
+ unsigned width = cmd_buffer->state.dynamic.line_width * 8;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
+ S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
+}
+
+static void
+radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
+ radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
+}
+
+static void
+radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ radeon_set_context_reg_seq(cmd_buffer->cs,
+ R_028430_DB_STENCILREFMASK, 2);
+ radeon_emit(cmd_buffer->cs,
+ S_028430_STENCILTESTVAL(d->stencil_reference.front) |
+ S_028430_STENCILMASK(d->stencil_compare_mask.front) |
+ S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
+ S_028430_STENCILOPVAL(1));
+ radeon_emit(cmd_buffer->cs,
+ S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
+ S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
+ S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
+ S_028434_STENCILOPVAL_BF(1));
+}
+
+static void
+radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+
+ radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
+ fui(d->depth_bounds.min));
+ radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
+ fui(d->depth_bounds.max));
+}
+
+static void
+radv_emit_depth_biais(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
+ struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
+ unsigned slope = fui(d->depth_bias.slope * 16.0f);
+ unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
+
+ if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
+ radeon_set_context_reg_seq(cmd_buffer->cs,
+ R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
+ radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
+ radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
+ radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
+ radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
+ radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
+ }
+}
+
static void
radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
int index,
- struct radv_color_buffer_info *cb)
+ struct radv_attachment_info *att)
{
bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
+ struct radv_color_buffer_info *cb = &att->cb;
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
- cb->gfx9_epitch);
+ S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
} else {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
}
radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
+ radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
- radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
}
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- if (!image->surface.htile_size || !aspects)
- return;
+ assert(image->surface.htile_size);
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
-
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
+ unsigned reg_offset = 0, reg_count = 0;
if (!image->surface.htile_size)
return;
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
+ if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ ++reg_count;
+ } else {
+ ++reg_offset;
+ va += 4;
+ }
+ if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
+ ++reg_count;
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
+ radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
radeon_emit(cmd_buffer->cs, 0);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
bool value)
{
uint64_t pred_val = value;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->dcc_pred_offset;
- if (!image->surface.dcc_size)
- return;
-
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
+ assert(image->surface.dcc_size);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
int idx,
uint32_t color_values[2])
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
- if (!image->cmask.size && !image->surface.dcc_size)
- return;
-
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
+ assert(image->cmask.size || image->surface.dcc_size);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
struct radv_image *image,
int idx)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
if (!image->cmask.size && !image->surface.dcc_size)
return;
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
radeon_emit(cmd_buffer->cs, 0);
}
-void
+static void
radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
{
int i;
struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ /* this may happen for inherited secondary recording */
+ if (!framebuffer)
+ return;
+
for (i = 0; i < 8; ++i) {
if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
int idx = subpass->color_attachments[i].attachment;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
- radv_emit_fb_color_state(cmd_buffer, i, &att->cb);
+ radv_emit_fb_color_state(cmd_buffer, i, att);
radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i);
}
VkImageLayout layout = subpass->depth_stencil_attachment.layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8);
- uint32_t queue_mask = radv_image_queue_family_mask(image,
- cmd_buffer->queue_family_index,
- cmd_buffer->queue_family_index);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index);
/* We currently don't support writing decompressed HTILE */
assert(radv_layout_has_htile(image, layout, queue_mask) ==
radv_layout_is_htile_compressed(image, layout, queue_mask));
}
radv_load_depth_clear_regs(cmd_buffer, image);
} else {
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
- radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */
- radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
+ else
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
+
+ radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
+ radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
}
radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
S_028208_BR_X(framebuffer->width) |
S_028208_BR_Y(framebuffer->height));
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ if (cmd_buffer->device->dfsm_allowed) {
radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
+
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
+}
+
+static void
+radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (state->index_type != state->last_index_type) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
+ 2, state->index_type);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ radeon_emit(cs, state->index_type);
+ }
+
+ state->last_index_type = state->index_type;
+ }
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
+ radeon_emit(cs, state->index_va);
+ radeon_emit(cs, state->index_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
+ radeon_emit(cs, state->max_index_count);
+
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
}
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
static void
radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
{
- struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
-
if (G_028810_DX_RASTERIZATION_KILL(cmd_buffer->state.pipeline->graphics.raster.pa_cl_clip_cntl))
return;
if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
radv_emit_scissor(cmd_buffer);
- if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) {
- unsigned width = cmd_buffer->state.dynamic.line_width * 8;
- radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
- S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
- }
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
+ radv_emit_line_width(cmd_buffer);
- if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
- radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4);
- }
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
+ radv_emit_blend_constants(cmd_buffer);
if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
- RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) {
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2);
- radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) |
- S_028430_STENCILMASK(d->stencil_compare_mask.front) |
- S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
- S_028430_STENCILOPVAL(1));
- radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
- S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
- S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
- S_028434_STENCILOPVAL_BF(1));
- }
+ RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
+ radv_emit_stencil(cmd_buffer);
- if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
- RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) {
- radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min));
- radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max));
- }
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
+ radv_emit_depth_bounds(cmd_buffer);
if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
- RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
- struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster;
- unsigned slope = fui(d->depth_bias.slope * 16.0f);
- unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
-
- if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) {
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
- radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
- radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
- radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
- radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
- radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
- }
- }
+ RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS))
+ radv_emit_depth_biais(cmd_buffer);
- cmd_buffer->state.dirty = 0;
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_DYNAMIC_ALL;
}
static void
gl_shader_stage stage)
{
struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = pipeline->user_data_0[stage];
if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
return;
radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
- uint32_t *ptr = NULL;
unsigned bo_offset;
- if (!radv_cmd_buffer_upload_alloc(cmd_buffer, set->size, 32,
- &bo_offset,
- (void**) &ptr))
+ if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
+ set->mapped_ptr,
+ &bo_offset))
return;
- set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
set->va += bo_offset;
-
- memcpy(ptr, set->mapped_ptr, set->size);
}
static void
for (unsigned i = 0; i < MAX_SETS; i++) {
uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
uint64_t set_va = 0;
- struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
- if (set)
+ struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
+ if (cmd_buffer->state.valid_descriptors & (1u << i))
set_va = set->va;
uptr[0] = set_va & 0xffffffff;
uptr[1] = set_va >> 32;
}
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
if (cmd_buffer->state.pipeline) {
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
- for (i = 0; i < MAX_SETS; i++) {
- if (!(cmd_buffer->state.descriptors_dirty & (1u << i)))
- continue;
- struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i];
- if (!set)
+ for_each_bit(i, cmd_buffer->state.descriptors_dirty) {
+ struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
+ if (!(cmd_buffer->state.valid_descriptors & (1u << i)))
continue;
radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
}
cmd_buffer->state.descriptors_dirty = 0;
cmd_buffer->state.push_descriptors_dirty = false;
+
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_save_descriptors(cmd_buffer);
+
assert(cmd_buffer->cs->cdw <= cdw_max);
}
uint64_t va;
stages &= cmd_buffer->push_constant_stages;
- if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count))
+ if (!stages ||
+ (!layout->push_constant_size && !layout->dynamic_offset_count))
return;
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
16 * layout->dynamic_offset_count);
- va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
assert(cmd_buffer->cs->cdw <= cdw_max);
}
-static void radv_emit_primitive_reset_state(struct radv_cmd_buffer *cmd_buffer,
- bool indexed_draw)
-{
- int32_t primitive_reset_en = indexed_draw && cmd_buffer->state.pipeline->graphics.prim_restart_enable;
-
- if (primitive_reset_en != cmd_buffer->state.last_primitive_reset_en) {
- cmd_buffer->state.last_primitive_reset_en = primitive_reset_en;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_uconfig_reg(cmd_buffer->cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
- primitive_reset_en);
- } else {
- radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
- primitive_reset_en);
- }
- }
-
- if (primitive_reset_en) {
- uint32_t primitive_reset_index = cmd_buffer->state.index_type ? 0xffffffffu : 0xffffu;
-
- if (primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) {
- cmd_buffer->state.last_primitive_reset_index = primitive_reset_index;
- radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
- primitive_reset_index);
- }
- }
-}
-
-static void
-radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer)
+static bool
+radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
- struct radv_device *device = cmd_buffer->device;
-
- if ((cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline || cmd_buffer->state.vb_dirty) &&
- cmd_buffer->state.pipeline->num_vertex_attribs &&
- cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.has_vertex_buffers) {
+ if ((pipeline_is_dirty ||
+ (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
+ cmd_buffer->state.pipeline->vertex_elements.count &&
+ radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
+ struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
uint32_t i = 0;
- uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs;
+ uint32_t count = velems->count;
uint64_t va;
/* allocate some descriptor state for vertex buffers */
- radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256,
- &vb_offset, &vb_ptr);
+ if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
+ &vb_offset, &vb_ptr))
+ return false;
- for (i = 0; i < num_attribs; i++) {
+ for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
uint32_t offset;
- int vb = cmd_buffer->state.pipeline->va_binding[i];
- struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
+ int vb = velems->binding[i];
+ struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
- device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
- va = device->ws->buffer_get_va(buffer->bo);
+ va = radv_buffer_get_va(buffer->bo);
- offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i];
+ offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
va += offset + buffer->offset;
desc[0] = va;
desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
- desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1;
+ desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
else
desc[2] = buffer->size - offset;
- desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i];
+ desc[3] = velems->rsrc_word3[i];
}
- va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += vb_offset;
radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_VERTEX_BUFFERS, va);
+
+ cmd_buffer->state.vb_va = va;
+ cmd_buffer->state.vb_size = count * 16;
+ cmd_buffer->state.vb_prefetch_dirty = true;
}
- cmd_buffer->state.vb_dirty = 0;
+ cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
+
+ return true;
}
-static void
-radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer,
- bool indexed_draw, bool instanced_draw,
- bool indirect_draw,
- uint32_t draw_vertex_count)
+static bool
+radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
- uint32_t ia_multi_vgt_param;
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 4096);
-
- radv_cmd_buffer_update_vertex_descriptors(cmd_buffer);
+ if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer, pipeline_is_dirty))
+ return false;
- if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
- radv_emit_graphics_pipeline(cmd_buffer, pipeline);
+ radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
+ radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
+ VK_SHADER_STAGE_ALL_GRAPHICS);
- if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS)
- radv_emit_framebuffer_state(cmd_buffer);
+ return true;
+}
- ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, indirect_draw, draw_vertex_count);
- if (cmd_buffer->state.last_ia_multi_vgt_param != ia_multi_vgt_param) {
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
- radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
- else if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
- radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
- else
- radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
- cmd_buffer->state.last_ia_multi_vgt_param = ia_multi_vgt_param;
+static void
+radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
+ bool instanced_draw, bool indirect_draw,
+ uint32_t draw_vertex_count)
+{
+ struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ uint32_t ia_multi_vgt_param;
+ int32_t primitive_reset_en;
+
+ /* Draw state. */
+ ia_multi_vgt_param =
+ si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
+ indirect_draw, draw_vertex_count);
+
+ if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
+ if (info->chip_class >= GFX9) {
+ radeon_set_uconfig_reg_idx(cs,
+ R_030960_IA_MULTI_VGT_PARAM,
+ 4, ia_multi_vgt_param);
+ } else if (info->chip_class >= CIK) {
+ radeon_set_context_reg_idx(cs,
+ R_028AA8_IA_MULTI_VGT_PARAM,
+ 1, ia_multi_vgt_param);
+ } else {
+ radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
+ ia_multi_vgt_param);
+ }
+ state->last_ia_multi_vgt_param = ia_multi_vgt_param;
}
- radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
-
- radv_emit_primitive_reset_state(cmd_buffer, indexed_draw);
+ /* Primitive restart. */
+ primitive_reset_en =
+ indexed_draw && state->pipeline->graphics.prim_restart_enable;
- radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
- radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
- VK_SHADER_STAGE_ALL_GRAPHICS);
+ if (primitive_reset_en != state->last_primitive_reset_en) {
+ state->last_primitive_reset_en = primitive_reset_en;
+ if (info->chip_class >= GFX9) {
+ radeon_set_uconfig_reg(cs,
+ R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
+ primitive_reset_en);
+ } else {
+ radeon_set_context_reg(cs,
+ R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
+ primitive_reset_en);
+ }
+ }
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ if (primitive_reset_en) {
+ uint32_t primitive_reset_index =
+ state->index_type ? 0xffffffffu : 0xffffu;
- si_emit_cache_flush(cmd_buffer);
+ if (primitive_reset_index != state->last_primitive_reset_index) {
+ radeon_set_context_reg(cs,
+ R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
+ primitive_reset_index);
+ state->last_primitive_reset_index = primitive_reset_index;
+ }
+ }
}
static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
- } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
- VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
cmd_buffer->state.subpass = subpass;
- cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS;
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
}
-static void
+static VkResult
radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
struct radv_render_pass *pass,
const VkRenderPassBeginInfo *info)
if (pass->attachment_count == 0) {
state->attachments = NULL;
- return;
+ return VK_SUCCESS;
}
state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
sizeof(state->attachments[0]),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (state->attachments == NULL) {
- /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
- abort();
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return cmd_buffer->record_result;
}
for (uint32_t i = 0; i < pass->attachment_count; ++i) {
}
state->attachments[i].pending_clear_aspects = clear_aspects;
+ state->attachments[i].cleared_views = 0;
if (clear_aspects && info) {
assert(info->clearValueCount > i);
state->attachments[i].clear_value = info->pClearValues[i];
state->attachments[i].current_layout = att->initial_layout;
}
+
+ return VK_SUCCESS;
}
VkResult radv_AllocateCommandBuffers(
VkResult result = VK_SUCCESS;
uint32_t i;
- memset(pCommandBuffers, 0,
- sizeof(*pCommandBuffers)*pAllocateInfo->commandBufferCount);
-
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
if (!list_empty(&pool->free_cmd_buffers)) {
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
- radv_reset_cmd_buffer(cmd_buffer);
+ result = radv_reset_cmd_buffer(cmd_buffer);
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->level = pAllocateInfo->level;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
- result = VK_SUCCESS;
} else {
result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
&pCommandBuffers[i]);
break;
}
- if (result != VK_SUCCESS)
+ if (result != VK_SUCCESS) {
radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
i, pCommandBuffers);
+ /* From the Vulkan 1.0.66 spec:
+ *
+ * "vkAllocateCommandBuffers can be used to create multiple
+ * command buffers. If the creation of any of those command
+ * buffers fails, the implementation must destroy all
+ * successfully created command buffer objects from this
+ * command, set all entries of the pCommandBuffers array to
+ * NULL and return the error."
+ */
+ memset(pCommandBuffers, 0,
+ sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
+ }
+
return result;
}
VkCommandBufferResetFlags flags)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_reset_cmd_buffer(cmd_buffer);
- return VK_SUCCESS;
+ return radv_reset_cmd_buffer(cmd_buffer);
}
static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
if (device->gfx_init) {
- uint64_t va = device->ws->buffer_get_va(device->gfx_init);
- device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
+ uint64_t va = radv_buffer_get_va(device->gfx_init);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
const VkCommandBufferBeginInfo *pBeginInfo)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_reset_cmd_buffer(cmd_buffer);
+ VkResult result = VK_SUCCESS;
+
+ if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
+ /* If the command buffer has already been resetted with
+ * vkResetCommandBuffer, no need to do it again.
+ */
+ result = radv_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
+ }
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
cmd_buffer->state.last_primitive_reset_en = -1;
+ cmd_buffer->state.last_index_type = -1;
+ cmd_buffer->usage_flags = pBeginInfo->flags;
/* setup initial configuration into command buffer */
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
switch (cmd_buffer->queue_family_index) {
case RADV_QUEUE_GENERAL:
emit_gfx_buffer_state(cmd_buffer);
- radv_set_db_count_control(cmd_buffer);
break;
case RADV_QUEUE_COMPUTE:
si_init_compute(cmd_buffer);
}
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
struct radv_subpass *subpass =
&cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
- radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
+ result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
+ if (result != VK_SUCCESS)
+ return result;
+
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- radv_cmd_buffer_trace_emit(cmd_buffer);
- return VK_SUCCESS;
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
+
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
+
+ return result;
}
void radv_CmdBindVertexBuffers(
const VkDeviceSize* pOffsets)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
+ struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
+ bool changed = false;
/* We have to defer setting up vertex buffer since we need the buffer
* stride from the pipeline. */
- assert(firstBinding + bindingCount < MAX_VBS);
+ assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
- vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]);
- vb[firstBinding + i].offset = pOffsets[i];
- cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
+ uint32_t idx = firstBinding + i;
+
+ if (!changed &&
+ (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
+ vb[idx].offset != pOffsets[i])) {
+ changed = true;
+ }
+
+ vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
+ vb[idx].offset = pOffsets[i];
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ vb[idx].buffer->bo, 8);
}
+
+ if (!changed) {
+ /* No state changes. */
+ return;
+ }
+
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
}
void radv_CmdBindIndexBuffer(
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
+ if (cmd_buffer->state.index_buffer == index_buffer &&
+ cmd_buffer->state.index_offset == offset &&
+ cmd_buffer->state.index_type == indexType) {
+ /* No state changes. */
+ return;
+ }
+
+ cmd_buffer->state.index_buffer = index_buffer;
+ cmd_buffer->state.index_offset = offset;
cmd_buffer->state.index_type = indexType; /* vk matches hw */
- cmd_buffer->state.index_va = cmd_buffer->device->ws->buffer_get_va(index_buffer->bo);
+ cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
cmd_buffer->state.index_va += index_buffer->offset + offset;
int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, index_buffer->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
}
-void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
- struct radv_descriptor_set *set,
- unsigned idx)
+static void
+radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_descriptor_set *set, unsigned idx)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- cmd_buffer->state.descriptors[idx] = set;
- cmd_buffer->state.descriptors_dirty |= (1u << idx);
+ radv_set_descriptor_set(cmd_buffer, set, idx);
if (!set)
return;
for (unsigned j = 0; j < set->layout->buffer_count; ++j)
if (set->descriptors[j])
- ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
if(set->bo)
- ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
}
void radv_CmdBindDescriptorSets(
if (!set->mapped_ptr) {
cmd_buffer->push_descriptors.capacity = 0;
- cmd_buffer->record_fail = true;
+ cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
return false;
}
struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
unsigned bo_offset;
+ assert(set == 0);
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
push_set->size = layout->set[set].layout->size;
(void**) &push_set->mapped_ptr))
return;
- push_set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
push_set->va += bo_offset;
radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
radv_descriptor_set_to_handle(push_set),
descriptorWriteCount, pDescriptorWrites, 0, NULL);
- cmd_buffer->state.descriptors[set] = push_set;
- cmd_buffer->state.descriptors_dirty |= (1u << set);
+ radv_set_descriptor_set(cmd_buffer, push_set, set);
}
void radv_CmdPushDescriptorSetKHR(
radv_descriptor_set_to_handle(push_set),
descriptorWriteCount, pDescriptorWrites, 0, NULL);
- cmd_buffer->state.descriptors[set] = push_set;
- cmd_buffer->state.descriptors_dirty |= (1u << set);
+ radv_set_descriptor_set(cmd_buffer, push_set, set);
cmd_buffer->state.push_descriptors_dirty = true;
}
radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
descriptorUpdateTemplate, pData);
- cmd_buffer->state.descriptors[set] = push_set;
- cmd_buffer->state.descriptors_dirty |= (1u << set);
+ radv_set_descriptor_set(cmd_buffer, push_set, set);
cmd_buffer->state.push_descriptors_dirty = true;
}
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER)
+ if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
+ if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
si_emit_cache_flush(cmd_buffer);
+ }
- if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) ||
- cmd_buffer->record_fail)
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
- return VK_SUCCESS;
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
+
+ if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
+
+ return cmd_buffer->record_result;
}
static void
radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radv_shader_variant *compute_shader;
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct radv_device *device = cmd_buffer->device;
+ unsigned compute_resource_limits;
+ unsigned waves_per_threadgroup;
uint64_t va;
if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
cmd_buffer->state.emitted_compute_pipeline = pipeline;
compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- va = ws->buffer_get_va(compute_shader->bo);
-
- ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
- radv_emit_prefetch(cmd_buffer, va, compute_shader->code_size);
+ va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 16);
+ cmd_buffer->cs, 19);
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
radeon_emit(cmd_buffer->cs, va >> 8);
S_00B860_WAVES(pipeline->max_waves) |
S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
+ /* Calculate best compute resource limits. */
+ waves_per_threadgroup =
+ DIV_ROUND_UP(compute_shader->info.cs.block_size[0] *
+ compute_shader->info.cs.block_size[1] *
+ compute_shader->info.cs.block_size[2], 64);
+ compute_resource_limits =
+ S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
+
+ if (device->physical_device->rad_info.chip_class >= CIK) {
+ unsigned num_cu_per_se =
+ device->physical_device->rad_info.num_good_compute_units /
+ device->physical_device->rad_info.max_se;
+
+ /* Force even distribution on all SIMDs in CU if the workgroup
+ * size is 64. This has shown some good improvements if # of
+ * CUs per SE is not a multiple of 4.
+ */
+ if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
+ compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
+ }
+
+ radeon_set_sh_reg(cmd_buffer->cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+ compute_resource_limits);
+
radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cmd_buffer->cs,
S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
assert(cmd_buffer->cs->cdw <= cdw_max);
+
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
}
static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer)
{
- for (unsigned i = 0; i < MAX_SETS; i++) {
- if (cmd_buffer->state.descriptors[i])
- cmd_buffer->state.descriptors_dirty |= (1u << i);
- }
+ cmd_buffer->state.descriptors_dirty |= cmd_buffer->state.valid_descriptors;
}
void radv_CmdBindPipeline(
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
- radv_mark_descriptor_sets_dirty(cmd_buffer);
-
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
+ if (cmd_buffer->state.compute_pipeline == pipeline)
+ return;
+ radv_mark_descriptor_sets_dirty(cmd_buffer);
+
cmd_buffer->state.compute_pipeline = pipeline;
cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
break;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ if (cmd_buffer->state.pipeline == pipeline)
+ return;
+ radv_mark_descriptor_sets_dirty(cmd_buffer);
+
cmd_buffer->state.pipeline = pipeline;
if (!pipeline)
break;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
cmd_buffer->push_constant_stages |= pipeline->active_stages;
- /* Apply the dynamic state from the pipeline */
- cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
- radv_dynamic_state_copy(&cmd_buffer->state.dynamic,
- &pipeline->dynamic_state,
- pipeline->dynamic_state_mask);
+ radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
const VkViewport* pViewports)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
- const uint32_t total_count = firstViewport + viewportCount;
- if (cmd_buffer->state.dynamic.viewport.count < total_count)
- cmd_buffer->state.dynamic.viewport.count = total_count;
+ assert(firstViewport < MAX_VIEWPORTS);
+ assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports));
const VkRect2D* pScissors)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
- const uint32_t total_count = firstScissor + scissorCount;
- if (cmd_buffer->state.dynamic.scissor.count < total_count)
- cmd_buffer->state.dynamic.scissor.count = total_count;
+ assert(firstScissor < MAX_SCISSORS);
+ assert(total_count >= 1 && total_count <= MAX_SCISSORS);
memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
pScissors, scissorCount * sizeof(*pScissors));
{
RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
+ assert(commandBufferCount > 0);
+
/* Emit pending flushes on primary prior to executing secondary */
si_emit_cache_flush(primary);
assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
}
primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
- }
- /* if we execute secondary we need to re-emit out pipelines */
- if (commandBufferCount) {
- primary->state.emitted_pipeline = NULL;
- primary->state.emitted_compute_pipeline = NULL;
- primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
- primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL;
- primary->state.last_primitive_reset_en = -1;
- primary->state.last_primitive_reset_index = 0;
- radv_mark_descriptor_sets_dirty(primary);
+
+ /* When the secondary command buffer is compute only we don't
+ * need to re-emit the current graphics pipeline.
+ */
+ if (secondary->state.emitted_pipeline) {
+ primary->state.emitted_pipeline =
+ secondary->state.emitted_pipeline;
+ }
+
+ /* When the secondary command buffer is graphics only we don't
+ * need to re-emit the current compute pipeline.
+ */
+ if (secondary->state.emitted_compute_pipeline) {
+ primary->state.emitted_compute_pipeline =
+ secondary->state.emitted_compute_pipeline;
+ }
+
+ /* Only re-emit the draw packets when needed. */
+ if (secondary->state.last_primitive_reset_en != -1) {
+ primary->state.last_primitive_reset_en =
+ secondary->state.last_primitive_reset_en;
+ }
+
+ if (secondary->state.last_primitive_reset_index) {
+ primary->state.last_primitive_reset_index =
+ secondary->state.last_primitive_reset_index;
+ }
+
+ if (secondary->state.last_ia_multi_vgt_param) {
+ primary->state.last_ia_multi_vgt_param =
+ secondary->state.last_ia_multi_vgt_param;
+ }
+
+ if (secondary->state.last_index_type != -1) {
+ primary->state.last_index_type =
+ secondary->state.last_index_type;
+ }
}
+
+ /* After executing commands from secondary buffers we have to dirty
+ * some states.
+ */
+ primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
+ RADV_CMD_DIRTY_INDEX_BUFFER |
+ RADV_CMD_DIRTY_DYNAMIC_ALL;
+ radv_mark_descriptor_sets_dirty(primary);
}
VkResult radv_CreateCommandPool(
VkCommandPoolResetFlags flags)
{
RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
+ VkResult result;
list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
&pool->cmd_buffers, pool_link) {
- radv_reset_cmd_buffer(cmd_buffer);
+ result = radv_reset_cmd_buffer(cmd_buffer);
+ if (result != VK_SUCCESS)
+ return result;
}
return VK_SUCCESS;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, 2048);
+ MAYBE_UNUSED VkResult result;
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
- radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
+
+ result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
+ if (result != VK_SUCCESS)
+ return;
radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
-void radv_CmdDraw(
- VkCommandBuffer commandBuffer,
- uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance)
+static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
-
- radv_cmd_buffer_flush_state(cmd_buffer, false, (instanceCount > 1), false, vertexCount);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
-
- assert(cmd_buffer->state.pipeline->graphics.vtx_base_sgpr);
- radeon_set_sh_reg_seq(cmd_buffer->cs, cmd_buffer->state.pipeline->graphics.vtx_base_sgpr,
- cmd_buffer->state.pipeline->graphics.vtx_emit_num);
- radeon_emit(cmd_buffer->cs, firstVertex);
- radeon_emit(cmd_buffer->cs, firstInstance);
- if (cmd_buffer->state.pipeline->graphics.vtx_emit_num == 3)
- radeon_emit(cmd_buffer->cs, 0);
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
+ if (!pipeline->shaders[stage])
+ continue;
+ struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
+ if (loc->sgpr_idx == -1)
+ continue;
+ uint32_t base_reg = pipeline->user_data_0[stage];
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, instanceCount);
+ }
+ if (pipeline->gs_copy_shader) {
+ struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
+ if (loc->sgpr_idx != -1) {
+ uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
+ radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
+ }
+ }
+}
+static void
+radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t vertex_count)
+{
radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, vertexCount);
+ radeon_emit(cmd_buffer->cs, vertex_count);
radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
- S_0287F0_USE_OPAQUE(0));
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ S_0287F0_USE_OPAQUE(0));
+}
- radv_cmd_buffer_trace_emit(cmd_buffer);
+static void
+radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
+ uint64_t index_va,
+ uint32_t index_count)
+{
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
+ radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
+ radeon_emit(cmd_buffer->cs, index_va);
+ radeon_emit(cmd_buffer->cs, index_va >> 32);
+ radeon_emit(cmd_buffer->cs, index_count);
+ radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
}
-void radv_CmdDrawIndexed(
- VkCommandBuffer commandBuffer,
- uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t vertexOffset,
- uint32_t firstInstance)
+static void
+radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
+ bool indexed,
+ uint32_t draw_count,
+ uint64_t count_va,
+ uint32_t stride)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- int index_size = cmd_buffer->state.index_type ? 4 : 2;
- uint64_t index_va;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
+ : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
+ bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
+ uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
+ assert(base_reg);
- radv_cmd_buffer_flush_state(cmd_buffer, true, (instanceCount > 1), false, indexCount);
+ if (draw_count == 1 && !count_va && !draw_id_enable) {
+ radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
+ PKT3_DRAW_INDIRECT, 3, false));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, di_src_sel);
+ } else {
+ radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
+ PKT3_DRAW_INDIRECT_MULTI,
+ 8, false));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
+ S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
+ S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
+ radeon_emit(cs, draw_count); /* count */
+ radeon_emit(cs, count_va); /* count_addr */
+ radeon_emit(cs, count_va >> 32);
+ radeon_emit(cs, stride); /* stride */
+ radeon_emit(cs, di_src_sel);
+ }
+}
+
+struct radv_draw_info {
+ /**
+ * Number of vertices.
+ */
+ uint32_t count;
+
+ /**
+ * Index of the first vertex.
+ */
+ int32_t vertex_offset;
+
+ /**
+ * First instance id.
+ */
+ uint32_t first_instance;
+
+ /**
+ * Number of instances.
+ */
+ uint32_t instance_count;
+
+ /**
+ * First index (indexed draws only).
+ */
+ uint32_t first_index;
+
+ /**
+ * Whether it's an indexed draw.
+ */
+ bool indexed;
+
+ /**
+ * Indirect draw parameters resource.
+ */
+ struct radv_buffer *indirect;
+ uint64_t indirect_offset;
+ uint32_t stride;
+
+ /**
+ * Draw count parameters resource.
+ */
+ struct radv_buffer *count_buffer;
+ uint64_t count_buffer_offset;
+};
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
+static void
+radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_draw_info *info)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ struct radeon_winsys *ws = cmd_buffer->device->ws;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_03090C_VGT_INDEX_TYPE,
- 2, cmd_buffer->state.index_type);
- } else {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
- }
+ if (info->indirect) {
+ uint64_t va = radv_buffer_get_va(info->indirect->bo);
+ uint64_t count_va = 0;
- assert(cmd_buffer->state.pipeline->graphics.vtx_base_sgpr);
- radeon_set_sh_reg_seq(cmd_buffer->cs, cmd_buffer->state.pipeline->graphics.vtx_base_sgpr,
- cmd_buffer->state.pipeline->graphics.vtx_emit_num);
- radeon_emit(cmd_buffer->cs, vertexOffset);
- radeon_emit(cmd_buffer->cs, firstInstance);
- if (cmd_buffer->state.pipeline->graphics.vtx_emit_num == 3)
- radeon_emit(cmd_buffer->cs, 0);
+ va += info->indirect->offset + info->indirect_offset;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
- radeon_emit(cmd_buffer->cs, instanceCount);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
- index_va = cmd_buffer->state.index_va;
- index_va += firstIndex * index_size;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
- radeon_emit(cmd_buffer->cs, index_va);
- radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
- radeon_emit(cmd_buffer->cs, indexCount);
- radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
- assert(cmd_buffer->cs->cdw <= cdw_max);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ if (info->count_buffer) {
+ count_va = radv_buffer_get_va(info->count_buffer->bo);
+ count_va += info->count_buffer->offset +
+ info->count_buffer_offset;
+
+ radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
+ }
+
+ if (!state->subpass->view_mask) {
+ radv_cs_emit_indirect_draw_packet(cmd_buffer,
+ info->indexed,
+ info->count,
+ count_va,
+ info->stride);
+ } else {
+ unsigned i;
+ for_each_bit(i, state->subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_indirect_draw_packet(cmd_buffer,
+ info->indexed,
+ info->count,
+ count_va,
+ info->stride);
+ }
+ }
+ } else {
+ assert(state->pipeline->graphics.vtx_base_sgpr);
+ radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
+ state->pipeline->graphics.vtx_emit_num);
+ radeon_emit(cs, info->vertex_offset);
+ radeon_emit(cs, info->first_instance);
+ if (state->pipeline->graphics.vtx_emit_num == 3)
+ radeon_emit(cs, 0);
+
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, state->predicating));
+ radeon_emit(cs, info->instance_count);
+
+ if (info->indexed) {
+ int index_size = state->index_type ? 4 : 2;
+ uint64_t index_va;
+
+ index_va = state->index_va;
+ index_va += info->first_index * index_size;
+
+ if (!state->subpass->view_mask) {
+ radv_cs_emit_draw_indexed_packet(cmd_buffer,
+ index_va,
+ info->count);
+ } else {
+ unsigned i;
+ for_each_bit(i, state->subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_draw_indexed_packet(cmd_buffer,
+ index_va,
+ info->count);
+ }
+ }
+ } else {
+ if (!state->subpass->view_mask) {
+ radv_cs_emit_draw_packet(cmd_buffer, info->count);
+ } else {
+ unsigned i;
+ for_each_bit(i, state->subpass->view_mask) {
+ radv_emit_view_index(cmd_buffer, i);
+
+ radv_cs_emit_draw_packet(cmd_buffer,
+ info->count);
+ }
+ }
+ }
+ }
}
static void
-radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer,
- VkBuffer _buffer,
- VkDeviceSize offset,
- VkBuffer _count_buffer,
- VkDeviceSize count_offset,
- uint32_t draw_count,
- uint32_t stride,
- bool indexed)
+radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_draw_info *info)
{
- RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
- unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
- : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
- indirect_va += offset + buffer->offset;
- uint64_t count_va = 0;
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
+ radv_emit_graphics_pipeline(cmd_buffer);
- if (count_buffer) {
- count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo);
- count_va += count_offset + count_buffer->offset;
- }
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
+ radv_emit_framebuffer_state(cmd_buffer);
- if (!draw_count)
- return;
+ if (info->indexed) {
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
+ radv_emit_index_buffer(cmd_buffer);
+ } else {
+ /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
+ * so the state must be re-emitted before the next indexed
+ * draw.
+ */
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ cmd_buffer->state.last_index_type = -1;
+ cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
+ }
+ }
- cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8);
- bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
- uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
- assert(base_reg);
+ radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
- radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
- radeon_emit(cs, 1);
- radeon_emit(cs, indirect_va);
- radeon_emit(cs, indirect_va >> 32);
-
- radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
- PKT3_DRAW_INDIRECT_MULTI,
- 8, false));
- radeon_emit(cs, 0);
- radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
- radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
- radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
- S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
- S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
- radeon_emit(cs, draw_count); /* count */
- radeon_emit(cs, count_va); /* count_addr */
- radeon_emit(cs, count_va >> 32);
- radeon_emit(cs, stride); /* stride */
- radeon_emit(cs, di_src_sel);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ radv_emit_draw_registers(cmd_buffer, info->indexed,
+ info->instance_count > 1, info->indirect,
+ info->indirect ? 0 : info->count);
}
static void
-radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- VkBuffer countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
-{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- radv_cmd_buffer_flush_state(cmd_buffer, false, false, true, 0);
+radv_draw(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_draw_info *info)
+{
+ bool pipeline_is_dirty =
+ (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
+ cmd_buffer->state.pipeline &&
+ cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
+
+ MAYBE_UNUSED unsigned cdw_max =
+ radeon_check_space(cmd_buffer->device->ws,
+ cmd_buffer->cs, 4096);
+
+ /* Use optimal packet order based on whether we need to sync the
+ * pipeline.
+ */
+ if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_DB |
+ RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
+ /* If we have to wait for idle, set all states first, so that
+ * all SET packets are processed in parallel with previous draw
+ * calls. Then upload descriptors, set shader pointers, and
+ * draw, and prefetch at the end. This ensures that the time
+ * the CUs are idle is very short. (there are only SET_SH
+ * packets between the wait and the draw)
+ */
+ radv_emit_all_graphics_states(cmd_buffer, info);
+ si_emit_cache_flush(cmd_buffer);
+ /* <-- CUs are idle here --> */
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 14);
+ if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
+ return;
- radv_emit_indirect_draw(cmd_buffer, buffer, offset,
- countBuffer, countBufferOffset, maxDrawCount, stride, false);
+ radv_emit_draw_packets(cmd_buffer, info);
+ /* <-- CUs are busy here --> */
+
+ /* Start prefetches after the draw has been started. Both will
+ * run in parallel, but starting the draw first is more
+ * important.
+ */
+ if (pipeline_is_dirty) {
+ radv_emit_prefetch(cmd_buffer,
+ cmd_buffer->state.pipeline);
+ }
+ } else {
+ /* If we don't wait for idle, start prefetches first, then set
+ * states, and draw at the end.
+ */
+ si_emit_cache_flush(cmd_buffer);
+
+ if (pipeline_is_dirty) {
+ radv_emit_prefetch(cmd_buffer,
+ cmd_buffer->state.pipeline);
+ }
+
+ if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
+ return;
+
+ radv_emit_all_graphics_states(cmd_buffer, info);
+ radv_emit_draw_packets(cmd_buffer, info);
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
+ radv_cmd_buffer_after_draw(cmd_buffer);
}
-static void
-radv_cmd_draw_indexed_indirect_count(
+void radv_CmdDraw(
VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- VkBuffer countBuffer,
- VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount,
- uint32_t stride)
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- uint64_t index_va;
- radv_cmd_buffer_flush_state(cmd_buffer, true, false, true, 0);
+ struct radv_draw_info info = {};
- index_va = cmd_buffer->state.index_va;
+ info.count = vertexCount;
+ info.instance_count = instanceCount;
+ info.first_instance = firstInstance;
+ info.vertex_offset = firstVertex;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21);
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0));
- radeon_emit(cmd_buffer->cs, index_va);
- radeon_emit(cmd_buffer->cs, index_va >> 32);
+ radv_draw(cmd_buffer, &info);
+}
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
+void radv_CmdDrawIndexed(
+ VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_draw_info info = {};
- radv_emit_indirect_draw(cmd_buffer, buffer, offset,
- countBuffer, countBufferOffset, maxDrawCount, stride, true);
+ info.indexed = true;
+ info.count = indexCount;
+ info.instance_count = instanceCount;
+ info.first_index = firstIndex;
+ info.vertex_offset = vertexOffset;
+ info.first_instance = firstInstance;
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ radv_draw(cmd_buffer, &info);
}
void radv_CmdDrawIndirect(
VkCommandBuffer commandBuffer,
- VkBuffer buffer,
+ VkBuffer _buffer,
VkDeviceSize offset,
uint32_t drawCount,
uint32_t stride)
{
- radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
- VK_NULL_HANDLE, 0, drawCount, stride);
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ struct radv_draw_info info = {};
+
+ info.count = drawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
}
void radv_CmdDrawIndexedIndirect(
VkCommandBuffer commandBuffer,
- VkBuffer buffer,
+ VkBuffer _buffer,
VkDeviceSize offset,
uint32_t drawCount,
uint32_t stride)
{
- radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
- VK_NULL_HANDLE, 0, drawCount, stride);
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ struct radv_draw_info info = {};
+
+ info.indexed = true;
+ info.count = drawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
}
void radv_CmdDrawIndirectCountAMD(
VkCommandBuffer commandBuffer,
- VkBuffer buffer,
+ VkBuffer _buffer,
VkDeviceSize offset,
- VkBuffer countBuffer,
+ VkBuffer _countBuffer,
VkDeviceSize countBufferOffset,
uint32_t maxDrawCount,
uint32_t stride)
{
- radv_cmd_draw_indirect_count(commandBuffer, buffer, offset,
- countBuffer, countBufferOffset,
- maxDrawCount, stride);
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
}
void radv_CmdDrawIndexedIndirectCountAMD(
VkCommandBuffer commandBuffer,
- VkBuffer buffer,
+ VkBuffer _buffer,
VkDeviceSize offset,
- VkBuffer countBuffer,
+ VkBuffer _countBuffer,
VkDeviceSize countBufferOffset,
uint32_t maxDrawCount,
uint32_t stride)
{
- radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset,
- countBuffer, countBufferOffset,
- maxDrawCount, stride);
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.indexed = true;
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
+}
+
+struct radv_dispatch_info {
+ /**
+ * Determine the layout of the grid (in block units) to be used.
+ */
+ uint32_t blocks[3];
+
+ /**
+ * Whether it's an unaligned compute dispatch.
+ */
+ bool unaligned;
+
+ /**
+ * Indirect compute parameters resource.
+ */
+ struct radv_buffer *indirect;
+ uint64_t indirect_offset;
+};
+
+static void
+radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_dispatch_info *info)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
+ unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
+ struct radeon_winsys *ws = cmd_buffer->device->ws;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct ac_userdata_info *loc;
+
+ loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
+ AC_UD_CS_GRID_SIZE);
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+
+ if (info->indirect) {
+ uint64_t va = radv_buffer_get_va(info->indirect->bo);
+
+ va += info->indirect->offset + info->indirect_offset;
+
+ radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+
+ if (loc->sgpr_idx != -1) {
+ for (unsigned i = 0; i < 3; ++i) {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG));
+ radeon_emit(cs, (va + 4 * i));
+ radeon_emit(cs, (va + 4 * i) >> 32);
+ radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
+ + loc->sgpr_idx * 4) >> 2) + i);
+ radeon_emit(cs, 0);
+ }
+ }
+
+ if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, dispatch_initiator);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, dispatch_initiator);
+ }
+ } else {
+ unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
+
+ if (info->unaligned) {
+ unsigned *cs_block_size = compute_shader->info.cs.block_size;
+ unsigned remainder[3];
+
+ /* If aligned, these should be an entire block size,
+ * not 0.
+ */
+ remainder[0] = blocks[0] + cs_block_size[0] -
+ align_u32_npot(blocks[0], cs_block_size[0]);
+ remainder[1] = blocks[1] + cs_block_size[1] -
+ align_u32_npot(blocks[1], cs_block_size[1]);
+ remainder[2] = blocks[2] + cs_block_size[2] -
+ align_u32_npot(blocks[2], cs_block_size[2]);
+
+ blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
+ blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
+ blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
+
+ radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
+ radeon_emit(cs,
+ S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
+ S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
+ radeon_emit(cs,
+ S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
+ S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
+ radeon_emit(cs,
+ S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
+ S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
+
+ dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
+ }
+
+ if (loc->sgpr_idx != -1) {
+ assert(!loc->indirect);
+ assert(loc->num_sgprs == 3);
+
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+ loc->sgpr_idx * 4, 3);
+ radeon_emit(cs, blocks[0]);
+ radeon_emit(cs, blocks[1]);
+ radeon_emit(cs, blocks[2]);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, blocks[0]);
+ radeon_emit(cs, blocks[1]);
+ radeon_emit(cs, blocks[2]);
+ radeon_emit(cs, dispatch_initiator);
+ }
+
+ assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
-radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer)
+radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
{
- radv_emit_compute_pipeline(cmd_buffer);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
VK_SHADER_STAGE_COMPUTE_BIT);
- si_emit_cache_flush(cmd_buffer);
+}
+
+static void
+radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_dispatch_info *info)
+{
+ struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ bool pipeline_is_dirty = pipeline &&
+ pipeline != cmd_buffer->state.emitted_compute_pipeline;
+
+ if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_DB |
+ RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
+ /* If we have to wait for idle, set all states first, so that
+ * all SET packets are processed in parallel with previous draw
+ * calls. Then upload descriptors, set shader pointers, and
+ * dispatch, and prefetch at the end. This ensures that the
+ * time the CUs are idle is very short. (there are only SET_SH
+ * packets between the wait and the draw)
+ */
+ radv_emit_compute_pipeline(cmd_buffer);
+ si_emit_cache_flush(cmd_buffer);
+ /* <-- CUs are idle here --> */
+
+ radv_upload_compute_shader_descriptors(cmd_buffer);
+
+ radv_emit_dispatch_packets(cmd_buffer, info);
+ /* <-- CUs are busy here --> */
+
+ /* Start prefetches after the dispatch has been started. Both
+ * will run in parallel, but starting the dispatch first is
+ * more important.
+ */
+ if (pipeline_is_dirty) {
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_COMPUTE]);
+ }
+ } else {
+ /* If we don't wait for idle, start prefetches first, then set
+ * states, and dispatch at the end.
+ */
+ si_emit_cache_flush(cmd_buffer);
+
+ if (pipeline_is_dirty) {
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_COMPUTE]);
+ }
+
+ radv_upload_compute_shader_descriptors(cmd_buffer);
+
+ radv_emit_compute_pipeline(cmd_buffer);
+ radv_emit_dispatch_packets(cmd_buffer, info);
+ }
+
+ radv_cmd_buffer_after_draw(cmd_buffer);
}
void radv_CmdDispatch(
uint32_t z)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_dispatch_info info = {};
- radv_flush_compute_state(cmd_buffer);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10);
+ info.blocks[0] = x;
+ info.blocks[1] = y;
+ info.blocks[2] = z;
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
- MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
- if (loc->sgpr_idx != -1) {
- assert(!loc->indirect);
- uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
- assert(loc->num_sgprs == grid_used);
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, grid_used);
- radeon_emit(cmd_buffer->cs, x);
- if (grid_used > 1)
- radeon_emit(cmd_buffer->cs, y);
- if (grid_used > 2)
- radeon_emit(cmd_buffer->cs, z);
- }
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
- PKT3_SHADER_TYPE_S(1));
- radeon_emit(cmd_buffer->cs, x);
- radeon_emit(cmd_buffer->cs, y);
- radeon_emit(cmd_buffer->cs, z);
- radeon_emit(cmd_buffer->cs, 1);
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ radv_dispatch(cmd_buffer, &info);
}
void radv_CmdDispatchIndirect(
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
- va += buffer->offset + offset;
-
- cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
+ struct radv_dispatch_info info = {};
- radv_flush_compute_state(cmd_buffer);
+ info.indirect = buffer;
+ info.indirect_offset = offset;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25);
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
- MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
- if (loc->sgpr_idx != -1) {
- uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
- for (unsigned i = 0; i < grid_used; ++i) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG));
- radeon_emit(cmd_buffer->cs, (va + 4 * i));
- radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32);
- radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i);
- radeon_emit(cmd_buffer->cs, 0);
- }
- }
-
- if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
- PKT3_SHADER_TYPE_S(1));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, 1);
- } else {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) |
- PKT3_SHADER_TYPE_S(1));
- radeon_emit(cmd_buffer->cs, 1);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
- PKT3_SHADER_TYPE_S(1));
- radeon_emit(cmd_buffer->cs, 0);
- radeon_emit(cmd_buffer->cs, 1);
- }
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ radv_dispatch(cmd_buffer, &info);
}
void radv_unaligned_dispatch(
uint32_t y,
uint32_t z)
{
- struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
- struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- uint32_t blocks[3], remainder[3];
-
- blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]);
- blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]);
- blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]);
-
- /* If aligned, these should be an entire block size, not 0 */
- remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]);
- remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]);
- remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]);
-
- radv_flush_compute_state(cmd_buffer);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15);
-
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) |
- S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) |
- S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) |
- S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
+ struct radv_dispatch_info info = {};
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline,
- MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE);
- if (loc->sgpr_idx != -1) {
- uint8_t grid_used = cmd_buffer->state.compute_pipeline->shaders[MESA_SHADER_COMPUTE]->info.info.cs.grid_components_used;
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, grid_used);
- radeon_emit(cmd_buffer->cs, blocks[0]);
- if (grid_used > 1)
- radeon_emit(cmd_buffer->cs, blocks[1]);
- if (grid_used > 2)
- radeon_emit(cmd_buffer->cs, blocks[2]);
- }
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
- PKT3_SHADER_TYPE_S(1));
- radeon_emit(cmd_buffer->cs, blocks[0]);
- radeon_emit(cmd_buffer->cs, blocks[1]);
- radeon_emit(cmd_buffer->cs, blocks[2]);
- radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) |
- S_00B800_PARTIAL_TG_EN(1));
+ info.blocks[0] = x;
+ info.blocks[1] = y;
+ info.blocks[2] = z;
+ info.unaligned = 1;
- assert(cmd_buffer->cs->cdw <= cdw_max);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ radv_dispatch(cmd_buffer, &info);
}
void radv_CmdEndRenderPass(
uint64_t size = image->surface.htile_slice_size * layer_count;
uint64_t offset = image->offset + image->htile_offset +
image->surface.htile_slice_size * range->baseArrayLayer;
+ struct radv_cmd_state *state = &cmd_buffer->state;
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
- radv_fill_buffer(cmd_buffer, image->bo, offset, size, clear_word);
+ state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
+ size, clear_word);
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
- RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
}
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image, uint32_t value)
{
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ struct radv_cmd_state *state = &cmd_buffer->state;
- radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset,
- image->cmask.size, value);
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
- RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+ state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
+ image->offset + image->cmask.offset,
+ image->cmask.size, value);
+
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
if (image->fmask.size)
void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image, uint32_t value)
{
+ struct radv_cmd_state *state = &cmd_buffer->state;
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset,
- image->surface.dcc_size, value);
+ state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
+ image->offset + image->dcc_offset,
+ image->surface.dcc_size, value);
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
- RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+ state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
+ RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
dst_queue_mask, range,
pending_clears);
- if (image->cmask.size)
+ if (image->cmask.size || image->fmask.size)
radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
+ dst_queue_mask, range);
if (image->surface.dcc_size)
radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
+ dst_queue_mask, range);
}
void radv_CmdPipelineBarrier(
unsigned value)
{
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
+ uint64_t va = radv_buffer_get_va(event->bo);
- cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class,
false,
- EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
1, va, 2, value);
assert(cmd_buffer->cs->cdw <= cdw_max);
for (unsigned i = 0; i < eventCount; ++i) {
RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
+ uint64_t va = radv_buffer_get_va(event->bo);
- cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);