#include "ac_debug.h"
+enum {
+ RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
+ RADV_PREFETCH_VS = (1 << 1),
+ RADV_PREFETCH_TCS = (1 << 2),
+ RADV_PREFETCH_TES = (1 << 3),
+ RADV_PREFETCH_GS = (1 << 4),
+ RADV_PREFETCH_PS = (1 << 5),
+ RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
+ RADV_PREFETCH_TCS |
+ RADV_PREFETCH_TES |
+ RADV_PREFETCH_GS |
+ RADV_PREFETCH_PS)
+};
+
static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
if (cmd_buffer->upload.upload_bo)
cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
- free(cmd_buffer->push_descriptors.set.mapped_ptr);
+
+ for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
cmd_buffer->ring_offsets_idx = -1;
+ for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ cmd_buffer->descriptors[i].dirty = 0;
+ cmd_buffer->descriptors[i].valid = 0;
+ cmd_buffer->descriptors[i].push_dirty = false;
+ }
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
void *fence_ptr;
radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
}
/* Force wait for graphics or compute engines to be idle. */
- si_cs_emit_cache_flush(cmd_buffer->cs, false,
+ si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
}
void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
struct radv_descriptor_set *set,
unsigned idx)
{
- cmd_buffer->descriptors[idx] = set;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
+
+ descriptors_state->sets[idx] = set;
if (set)
- cmd_buffer->state.valid_descriptors |= (1u << idx);
+ descriptors_state->valid |= (1u << idx);
else
- cmd_buffer->state.valid_descriptors &= ~(1u << idx);
- cmd_buffer->state.descriptors_dirty |= (1u << idx);
-
+ descriptors_state->valid &= ~(1u << idx);
+ descriptors_state->dirty |= (1u << idx);
}
static void
-radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer)
+radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
{
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint32_t data[MAX_SETS * 2] = {};
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
cmd_buffer->cs, 4 + MAX_SETS * 2);
- for_each_bit(i, cmd_buffer->state.valid_descriptors) {
- struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
+ for_each_bit(i, descriptors_state->valid) {
+ struct radv_descriptor_set *set = descriptors_state->sets[i];
data[i * 2] = (uintptr_t)set;
data[i * 2 + 1] = (uintptr_t)set >> 32;
}
radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
}
-struct ac_userdata_info *
+struct radv_userdata_info *
radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
int idx)
gl_shader_stage stage,
int idx, uint64_t va)
{
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
uint32_t base_reg = pipeline->user_data_0[stage];
if (loc->sgpr_idx == -1)
return;
}
}
+static void
+radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_shader_variant *shader)
+{
+ uint64_t va;
+ if (!shader)
+ return;
-static inline void
-radv_emit_prefetch_TC_L2_async(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
- unsigned size)
-{
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
- si_cp_dma_prefetch(cmd_buffer, va, size);
+ va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+
+ si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
}
static void
-radv_emit_VBO_descriptors_prefetch(struct radv_cmd_buffer *cmd_buffer)
+radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ bool vertex_stage_only)
{
- if (cmd_buffer->state.vb_prefetch_dirty) {
- radv_emit_prefetch_TC_L2_async(cmd_buffer,
- cmd_buffer->state.vb_va,
- cmd_buffer->state.vb_size);
- cmd_buffer->state.vb_prefetch_dirty = false;
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t mask = state->prefetch_L2_mask;
+
+ if (vertex_stage_only) {
+ /* Fast prefetch path for starting draws as soon as possible.
+ */
+ mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
+ RADV_PREFETCH_VBO_DESCRIPTORS);
}
+
+ if (mask & RADV_PREFETCH_VS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_VERTEX]);
+
+ if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
+ si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
+
+ if (mask & RADV_PREFETCH_TCS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_CTRL]);
+
+ if (mask & RADV_PREFETCH_TES)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_EVAL]);
+
+ if (mask & RADV_PREFETCH_GS) {
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_GEOMETRY]);
+ radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
+ }
+
+ if (mask & RADV_PREFETCH_PS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_FRAGMENT]);
+
+ state->prefetch_L2_mask &= ~mask;
}
static void
-radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
- struct radv_shader_variant *shader)
+radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va;
-
- if (!shader)
+ if (!cmd_buffer->device->physical_device->rbplus_allowed)
return;
- va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
- radv_cs_add_buffer(ws, cs, shader->bo, 8);
- radv_emit_prefetch_TC_L2_async(cmd_buffer, va, shader->code_size);
-}
+ unsigned sx_ps_downconvert = 0;
+ unsigned sx_blend_opt_epsilon = 0;
+ unsigned sx_blend_opt_control = 0;
-static void
-radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
-{
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_VERTEX]);
- radv_emit_VBO_descriptors_prefetch(cmd_buffer);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_TESS_CTRL]);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_TESS_EVAL]);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_GEOMETRY]);
- radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_FRAGMENT]);
+ for (unsigned i = 0; i < subpass->color_count; ++i) {
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ int idx = subpass->color_attachments[i].attachment;
+ struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
+
+ unsigned format = G_028C70_FORMAT(cb->cb_color_info);
+ unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
+ uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
+ uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
+
+ bool has_alpha, has_rgb;
+
+ /* Set if RGB and A are present. */
+ has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
+
+ if (format == V_028C70_COLOR_8 ||
+ format == V_028C70_COLOR_16 ||
+ format == V_028C70_COLOR_32)
+ has_rgb = !has_alpha;
+ else
+ has_rgb = true;
+
+ /* Check the colormask and export format. */
+ if (!(colormask & 0x7))
+ has_rgb = false;
+ if (!(colormask & 0x8))
+ has_alpha = false;
+
+ if (spi_format == V_028714_SPI_SHADER_ZERO) {
+ has_rgb = false;
+ has_alpha = false;
+ }
+
+ /* Disable value checking for disabled channels. */
+ if (!has_rgb)
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ if (!has_alpha)
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+
+ /* Enable down-conversion for 32bpp and smaller formats. */
+ switch (format) {
+ case V_028C70_COLOR_8:
+ case V_028C70_COLOR_8_8:
+ case V_028C70_COLOR_8_8_8_8:
+ /* For 1 and 2-channel formats, use the superset thereof. */
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_5_6_5:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_1_5_5_5:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_4_4_4_4:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_32:
+ if (swap == V_028C70_SWAP_STD &&
+ spi_format == V_028714_SPI_SHADER_32_R)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
+ else if (swap == V_028C70_SWAP_ALT_REV &&
+ spi_format == V_028714_SPI_SHADER_32_AR)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
+ break;
+
+ case V_028C70_COLOR_16:
+ case V_028C70_COLOR_16_16:
+ /* For 1-channel formats, use the superset thereof. */
+ if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
+ if (swap == V_028C70_SWAP_STD ||
+ swap == V_028C70_SWAP_STD_REV)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
+ else
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_10_11_11:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_2_10_10_10:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
+ }
+ break;
+ }
+ }
+
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
+ radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
+ radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
+ radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
}
static void
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
+ for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
+ if (!pipeline->shaders[i])
+ continue;
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->shaders[i]->bo, 8);
+ }
+
+ if (radv_pipeline_has_gs(pipeline))
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->gs_copy_shader->bo, 8);
+
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
- radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
radeon_emit(cmd_buffer->cs, cb->cb_color_view);
radeon_emit(cmd_buffer->cs, cb_color_info);
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
- radeon_emit(cmd_buffer->cs, cb->cb_color_cmask >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
- radeon_emit(cmd_buffer->cs, cb->cb_color_fmask >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
- radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
- radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
radeon_emit(cmd_buffer->cs, ds->db_depth_size);
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); /* DB_Z_READ_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); /* DB_STENCIL_READ_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_z_write_base >> 32); /* DB_Z_WRITE_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base >> 32); /* DB_STENCIL_WRITE_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
radeon_emit(cmd_buffer->cs, ds->db_z_info2);
va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- assert(image->surface.htile_size);
+ assert(radv_image_has_htile(image));
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- if (!image->surface.htile_size)
+ if (!radv_image_has_htile(image))
return;
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->dcc_pred_offset;
- assert(image->surface.dcc_size);
+ assert(radv_image_has_dcc(image));
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
- assert(image->cmask.size || image->surface.dcc_size);
+ assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
- if (!image->cmask.size && !image->surface.dcc_size)
+ if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
return;
uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
{
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t pa_sc_mode_cntl_1 =
+ pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
uint32_t db_count_control;
if(!cmd_buffer->state.active_occlusion_queries) {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
+ /* Re-enable out-of-order rasterization if the
+ * bound pipeline supports it and if it's has
+ * been disabled before starting occlusion
+ * queries.
+ */
+ radeon_set_context_reg(cmd_buffer->cs,
+ R_028A4C_PA_SC_MODE_CNTL_1,
+ pa_sc_mode_cntl_1);
+ }
db_count_control = 0;
} else {
db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
}
} else {
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
+ bool perfect = cmd_buffer->state.perfect_occlusion_queries_enabled;
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
- db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
+ db_count_control =
+ S_028004_PERFECT_ZPASS_COUNTS(perfect) |
+ S_028004_SAMPLE_RATE(sample_rate) |
S_028004_ZPASS_ENABLE(1) |
S_028004_SLICE_EVEN_ENABLE(1) |
S_028004_SLICE_ODD_ENABLE(1);
+
+ if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
+ /* If the bound pipeline has enabled
+ * out-of-order rasterization, we should
+ * disable it before starting occlusion
+ * queries.
+ */
+ pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
+
+ radeon_set_context_reg(cmd_buffer->cs,
+ R_028A4C_PA_SC_MODE_CNTL_1,
+ pa_sc_mode_cntl_1);
+ }
} else {
db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
+ S_028004_SAMPLE_RATE(sample_rate);
}
}
uint64_t va,
gl_shader_stage stage)
{
- struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
+ struct radv_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
uint32_t base_reg = pipeline->user_data_0[stage];
if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
}
static void
-radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer)
+radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
{
- struct radv_descriptor_set *set = &cmd_buffer->push_descriptors.set;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
+ struct radv_descriptor_set *set = &descriptors_state->push_set.set;
unsigned bo_offset;
if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
}
static void
-radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer)
+radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
{
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
uint32_t size = MAX_SETS * 2 * 4;
uint32_t offset;
void *ptr;
for (unsigned i = 0; i < MAX_SETS; i++) {
uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
uint64_t set_va = 0;
- struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
- if (cmd_buffer->state.valid_descriptors & (1u << i))
+ struct radv_descriptor_set *set = descriptors_state->sets[i];
+ if (descriptors_state->valid & (1u << i))
set_va = set->va;
uptr[0] = set_va & 0xffffffff;
uptr[1] = set_va >> 32;
radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
VkShaderStageFlags stages)
{
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
unsigned i;
- if (!cmd_buffer->state.descriptors_dirty)
+ if (!descriptors_state->dirty)
return;
- if (cmd_buffer->state.push_descriptors_dirty)
- radv_flush_push_descriptors(cmd_buffer);
+ if (descriptors_state->push_dirty)
+ radv_flush_push_descriptors(cmd_buffer, bind_point);
if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
(cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
- radv_flush_indirect_descriptor_sets(cmd_buffer);
+ radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
}
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
- for_each_bit(i, cmd_buffer->state.descriptors_dirty) {
- struct radv_descriptor_set *set = cmd_buffer->descriptors[i];
- if (!(cmd_buffer->state.valid_descriptors & (1u << i)))
+ for_each_bit(i, descriptors_state->dirty) {
+ struct radv_descriptor_set *set = descriptors_state->sets[i];
+ if (!(descriptors_state->valid & (1u << i)))
continue;
radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
}
- cmd_buffer->state.descriptors_dirty = 0;
- cmd_buffer->state.push_descriptors_dirty = false;
+ descriptors_state->dirty = 0;
+ descriptors_state->push_dirty = false;
if (unlikely(cmd_buffer->device->trace_bo))
- radv_save_descriptors(cmd_buffer);
+ radv_save_descriptors(cmd_buffer, bind_point);
assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
VkShaderStageFlags stages)
{
+ struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
+ ? cmd_buffer->state.compute_pipeline
+ : cmd_buffer->state.pipeline;
struct radv_pipeline_layout *layout = pipeline->layout;
unsigned offset;
void *ptr;
assert(cmd_buffer->cs->cdw <= cdw_max);
}
-static bool
-radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
+static void
+radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
+ bool pipeline_is_dirty)
{
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
/* allocate some descriptor state for vertex buffers */
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
&vb_offset, &vb_ptr))
- return false;
+ return;
for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
cmd_buffer->state.vb_va = va;
cmd_buffer->state.vb_size = count * 16;
- cmd_buffer->state.vb_prefetch_dirty = true;
+ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
}
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
-
- return true;
}
-static bool
+static void
radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
- if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer, pipeline_is_dirty))
- return false;
-
+ radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
- radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
- VK_SHADER_STAGE_ALL_GRAPHICS);
-
- return true;
+ radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
}
static void
static void
radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
struct radv_descriptor_set *set, unsigned idx)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- radv_set_descriptor_set(cmd_buffer, set, idx);
+ radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
if (!set)
return;
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
- radv_bind_descriptor_set(cmd_buffer, set, idx);
+ radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
struct radv_descriptor_set *set,
- struct radv_descriptor_set_layout *layout)
+ struct radv_descriptor_set_layout *layout,
+ VkPipelineBindPoint bind_point)
{
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
set->size = layout->size;
set->layout = layout;
- if (cmd_buffer->push_descriptors.capacity < set->size) {
+ if (descriptors_state->push_set.capacity < set->size) {
size_t new_size = MAX2(set->size, 1024);
- new_size = MAX2(new_size, 2 * cmd_buffer->push_descriptors.capacity);
+ new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
free(set->mapped_ptr);
set->mapped_ptr = malloc(new_size);
if (!set->mapped_ptr) {
- cmd_buffer->push_descriptors.capacity = 0;
+ descriptors_state->push_set.capacity = 0;
cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
return false;
}
- cmd_buffer->push_descriptors.capacity = new_size;
+ descriptors_state->push_set.capacity = new_size;
}
return true;
radv_descriptor_set_to_handle(push_set),
descriptorWriteCount, pDescriptorWrites, 0, NULL);
- radv_set_descriptor_set(cmd_buffer, push_set, set);
+ radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
}
void radv_CmdPushDescriptorSetKHR(
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
- struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
+ struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
- if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
+ if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
+ layout->set[set].layout,
+ pipelineBindPoint))
return;
radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
radv_descriptor_set_to_handle(push_set),
descriptorWriteCount, pDescriptorWrites, 0, NULL);
- radv_set_descriptor_set(cmd_buffer, push_set, set);
- cmd_buffer->state.push_descriptors_dirty = true;
+ radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
+ descriptors_state->push_dirty = true;
}
void radv_CmdPushDescriptorSetWithTemplateKHR(
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
- struct radv_descriptor_set *push_set = &cmd_buffer->push_descriptors.set;
+ RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, templ->bind_point);
+ struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
- if (!radv_init_push_descriptor_set(cmd_buffer, push_set, layout->set[set].layout))
+ if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
+ layout->set[set].layout,
+ templ->bind_point))
return;
radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
descriptorUpdateTemplate, pData);
- radv_set_descriptor_set(cmd_buffer, push_set, set);
- cmd_buffer->state.push_descriptors_dirty = true;
+ radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
+ descriptors_state->push_dirty = true;
}
void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
static void
radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
- struct radv_shader_variant *compute_shader;
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
- struct radv_device *device = cmd_buffer->device;
- unsigned compute_resource_limits;
- unsigned waves_per_threadgroup;
- uint64_t va;
if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
return;
cmd_buffer->state.emitted_compute_pipeline = pipeline;
- compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
- cmd_buffer->cs, 19);
-
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2);
- radeon_emit(cmd_buffer->cs, va >> 8);
- radeon_emit(cmd_buffer->cs, va >> 40);
-
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
- radeon_emit(cmd_buffer->cs, compute_shader->rsrc1);
- radeon_emit(cmd_buffer->cs, compute_shader->rsrc2);
-
+ radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
+ radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
cmd_buffer->compute_scratch_size_needed =
MAX2(cmd_buffer->compute_scratch_size_needed,
pipeline->max_waves * pipeline->scratch_bytes_per_wave);
- /* change these once we have scratch support */
- radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE,
- S_00B860_WAVES(pipeline->max_waves) |
- S_00B860_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
-
- /* Calculate best compute resource limits. */
- waves_per_threadgroup =
- DIV_ROUND_UP(compute_shader->info.cs.block_size[0] *
- compute_shader->info.cs.block_size[1] *
- compute_shader->info.cs.block_size[2], 64);
- compute_resource_limits =
- S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
-
- if (device->physical_device->rad_info.chip_class >= CIK) {
- unsigned num_cu_per_se =
- device->physical_device->rad_info.num_good_compute_units /
- device->physical_device->rad_info.max_se;
-
- /* Force even distribution on all SIMDs in CU if the workgroup
- * size is 64. This has shown some good improvements if # of
- * CUs per SE is not a multiple of 4.
- */
- if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
- compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
- }
-
- radeon_set_sh_reg(cmd_buffer->cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
- compute_resource_limits);
-
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
- radeon_emit(cmd_buffer->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->shaders[MESA_SHADER_COMPUTE]->bo, 8);
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
}
-static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer)
+static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point)
{
- cmd_buffer->state.descriptors_dirty |= cmd_buffer->state.valid_descriptors;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
+
+ descriptors_state->dirty |= descriptors_state->valid;
}
void radv_CmdBindPipeline(
case VK_PIPELINE_BIND_POINT_COMPUTE:
if (cmd_buffer->state.compute_pipeline == pipeline)
return;
- radv_mark_descriptor_sets_dirty(cmd_buffer);
+ radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
cmd_buffer->state.compute_pipeline = pipeline;
cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
if (cmd_buffer->state.pipeline == pipeline)
return;
- radv_mark_descriptor_sets_dirty(cmd_buffer);
+ radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
cmd_buffer->state.pipeline = pipeline;
if (!pipeline)
cmd_buffer->state.last_first_instance = -1;
cmd_buffer->state.last_vertex_offset = -1;
+ /* Prefetch all pipeline shaders at first draw time. */
+ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
+
radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
cmd_buffer->tess_rings_needed = true;
if (radv_pipeline_has_gs(pipeline)) {
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
AC_UD_SCRATCH_RING_OFFSETS);
if (cmd_buffer->ring_offsets_idx == -1)
cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
secondary->state.last_ia_multi_vgt_param;
}
- if (secondary->state.last_first_instance != -1) {
- primary->state.last_first_instance =
- secondary->state.last_first_instance;
- }
-
- if (secondary->state.last_num_instances != -1) {
- primary->state.last_num_instances =
- secondary->state.last_num_instances;
- }
-
- if (secondary->state.last_vertex_offset != -1) {
- primary->state.last_vertex_offset =
- secondary->state.last_vertex_offset;
- }
+ primary->state.last_first_instance = secondary->state.last_first_instance;
+ primary->state.last_num_instances = secondary->state.last_num_instances;
+ primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
if (secondary->state.last_index_type != -1) {
primary->state.last_index_type =
primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
RADV_CMD_DIRTY_INDEX_BUFFER |
RADV_CMD_DIRTY_DYNAMIC_ALL;
- radv_mark_descriptor_sets_dirty(primary);
+ radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
}
VkResult radv_CreateCommandPool(
return VK_SUCCESS;
}
-void radv_TrimCommandPoolKHR(
+void radv_TrimCommandPool(
VkDevice device,
VkCommandPool commandPool,
VkCommandPoolTrimFlagsKHR flags)
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
if (!pipeline->shaders[stage])
continue;
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
uint32_t base_reg = pipeline->user_data_0[stage];
}
if (pipeline->gs_copy_shader) {
- struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
+ struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
if (loc->sgpr_idx != -1) {
uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
}
if (state->last_num_instances != info->instance_count) {
- radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, state->predicating));
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
radeon_emit(cs, info->instance_count);
state->last_num_instances = info->instance_count;
}
radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
+ cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
+ radv_emit_rbplus_state(cmd_buffer);
+
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
radv_emit_graphics_pipeline(cmd_buffer);
radv_draw(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline &&
si_emit_cache_flush(cmd_buffer);
/* <-- CUs are idle here --> */
- if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
- return;
+ radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
radv_emit_draw_packets(cmd_buffer, info);
/* <-- CUs are busy here --> */
* run in parallel, but starting the draw first is more
* important.
*/
- if (pipeline_is_dirty) {
- radv_emit_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, false);
}
} else {
/* If we don't wait for idle, start prefetches first, then set
*/
si_emit_cache_flush(cmd_buffer);
- if (pipeline_is_dirty) {
- radv_emit_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ /* Only prefetch the vertex shader and VBO descriptors
+ * in order to start the draw as soon as possible.
+ */
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, true);
}
- if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
- return;
+ radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
radv_emit_all_graphics_states(cmd_buffer, info);
radv_emit_draw_packets(cmd_buffer, info);
+
+ /* Prefetch the remaining shaders after the draw has been
+ * started.
+ */
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, false);
+ }
}
assert(cmd_buffer->cs->cdw <= cdw_max);
*/
uint32_t blocks[3];
+ /**
+ * A starting offset for the grid. If unaligned is set, the offset
+ * must still be aligned.
+ */
+ uint32_t offsets[3];
/**
* Whether it's an unaligned compute dispatch.
*/
unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- struct ac_userdata_info *loc;
+ struct radv_userdata_info *loc;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
}
} else {
unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
+ unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
if (info->unaligned) {
unsigned *cs_block_size = compute_shader->info.cs.block_size;
blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
+ for(unsigned i = 0; i < 3; ++i) {
+ assert(offsets[i] % cs_block_size[i] == 0);
+ offsets[i] /= cs_block_size[i];
+ }
+
radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cs,
S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
radeon_emit(cs, blocks[2]);
}
+ if (offsets[0] || offsets[1] || offsets[2]) {
+ radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
+ radeon_emit(cs, offsets[0]);
+ radeon_emit(cs, offsets[1]);
+ radeon_emit(cs, offsets[2]);
+
+ /* The blocks in the packet are not counts but end values. */
+ for (unsigned i = 0; i < 3; ++i)
+ blocks[i] += offsets[i];
+ } else {
+ dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
+ }
+
radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, blocks[0]);
radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
{
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
- radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
- VK_SHADER_STAGE_COMPUTE_BIT);
+ radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
}
static void
const struct radv_dispatch_info *info)
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
* will run in parallel, but starting the dispatch first is
* more important.
*/
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}
*/
si_emit_cache_flush(cmd_buffer);
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}
radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
}
-void radv_CmdDispatch(
+void radv_CmdDispatchBase(
VkCommandBuffer commandBuffer,
+ uint32_t base_x,
+ uint32_t base_y,
+ uint32_t base_z,
uint32_t x,
uint32_t y,
uint32_t z)
info.blocks[1] = y;
info.blocks[2] = z;
+ info.offsets[0] = base_x;
+ info.offsets[1] = base_y;
+ info.offsets[2] = base_z;
radv_dispatch(cmd_buffer, &info);
}
+void radv_CmdDispatch(
+ VkCommandBuffer commandBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z)
+{
+ radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
+}
+
void radv_CmdDispatchIndirect(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
/*
* For HTILE we have the following interesting clear words:
- * 0x0000030f: Uncompressed for depth+stencil HTILE.
- * 0x0000000f: Uncompressed for depth only HTILE.
+ * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
+ * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
* 0xfffffff0: Clear depth to 1.0
* 0x00000000: Clear depth to 0.0
*/
const VkImageSubresourceRange *range,
VkImageAspectFlags pending_clears)
{
+ if (!radv_image_has_htile(image))
+ return;
+
if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
(pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
radv_initialize_htile(cmd_buffer, image, range, 0);
} else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
- uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0x30f : 0xf;
+ uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
radv_initialize_htile(cmd_buffer, image, range, clear_value);
} else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
!radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
}
}
-void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, uint32_t value)
+static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->cmask.offset,
- image->cmask.size, value);
+ state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
-static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkImageLayout src_layout,
- VkImageLayout dst_layout,
- unsigned src_queue_mask,
- unsigned dst_queue_mask,
- const VkImageSubresourceRange *range)
-{
- if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- if (image->fmask.size)
- radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
- else
- radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
- } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
- radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
- }
-}
-
void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image, uint32_t value)
{
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset,
- image->surface.dcc_size, value);
+ state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
-static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkImageLayout src_layout,
- VkImageLayout dst_layout,
- unsigned src_queue_mask,
- unsigned dst_queue_mask,
- const VkImageSubresourceRange *range)
+/**
+ * Initialize DCC/FMASK/CMASK metadata for a color image.
+ */
+static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkImageLayout src_layout,
+ VkImageLayout dst_layout,
+ unsigned src_queue_mask,
+ unsigned dst_queue_mask)
{
- if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
- radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
- } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- radv_initialize_dcc(cmd_buffer, image,
- radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask) ?
- 0x20202020u : 0xffffffffu);
- } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
- !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
- radv_decompress_dcc(cmd_buffer, image, range);
- } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
- radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ if (radv_image_has_cmask(image)) {
+ uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+
+ /* TODO: clarify this. */
+ if (radv_image_has_fmask(image)) {
+ value = 0xccccccccu;
+ }
+
+ radv_initialise_cmask(cmd_buffer, image, value);
+ }
+
+ if (radv_image_has_dcc(image)) {
+ uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+
+ if (radv_layout_dcc_compressed(image, dst_layout,
+ dst_queue_mask)) {
+ value = 0x20202020u;
+ }
+
+ radv_initialize_dcc(cmd_buffer, image, value);
+ }
+}
+
+/**
+ * Handle color image transitions for DCC/FMASK/CMASK.
+ */
+static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkImageLayout src_layout,
+ VkImageLayout dst_layout,
+ unsigned src_queue_mask,
+ unsigned dst_queue_mask,
+ const VkImageSubresourceRange *range)
+{
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ radv_init_color_image_metadata(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask);
+ return;
+ }
+
+ if (radv_image_has_dcc(image)) {
+ if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+ radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
+ } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
+ !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
+ radv_decompress_dcc(cmd_buffer, image, range);
+ } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ }
+ } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
+ if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ }
}
}
return;
}
- unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
- unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
-
- if (image->surface.htile_size)
- radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
-
- if (image->cmask.size || image->fmask.size)
- radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range);
+ unsigned src_queue_mask =
+ radv_image_queue_family_mask(image, src_family,
+ cmd_buffer->queue_family_index);
+ unsigned dst_queue_mask =
+ radv_image_queue_family_mask(image, dst_family,
+ cmd_buffer->queue_family_index);
- if (image->surface.dcc_size)
- radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range);
+ if (vk_format_is_depth(image->vk_format)) {
+ radv_handle_depth_image_transition(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask,
+ range, pending_clears);
+ } else {
+ radv_handle_color_image_transition(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask,
+ range);
+ }
}
void radv_CmdPipelineBarrier(
RADV_CMD_FLAG_INV_VMEM_L1 |
RADV_CMD_FLAG_INV_SMEM_L1;
}
+
+
+void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
+}