#include "ac_debug.h"
+enum {
+ RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
+ RADV_PREFETCH_VS = (1 << 1),
+ RADV_PREFETCH_TCS = (1 << 2),
+ RADV_PREFETCH_TES = (1 << 3),
+ RADV_PREFETCH_GS = (1 << 4),
+ RADV_PREFETCH_PS = (1 << 5),
+ RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
+ RADV_PREFETCH_TCS |
+ RADV_PREFETCH_TES |
+ RADV_PREFETCH_GS |
+ RADV_PREFETCH_PS)
+};
+
static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
VkImageLayout src_layout,
cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->queue_family_index = pool->queue_family_index;
} else {
- /* Init the pool_link so we can safefly call list_del when we destroy
+ /* Init the pool_link so we can safely call list_del when we destroy
* the command buffer
*/
list_inithead(&cmd_buffer->pool_link);
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
if (cmd_buffer->upload.upload_bo)
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- cmd_buffer->upload.upload_bo, 8);
+ cmd_buffer->upload.upload_bo);
cmd_buffer->upload.offset = 0;
cmd_buffer->record_result = VK_SUCCESS;
- cmd_buffer->ring_offsets_idx = -1;
-
for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
cmd_buffer->descriptors[i].dirty = 0;
cmd_buffer->descriptors[i].valid = 0;
}
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
+ unsigned eop_bug_offset;
void *fence_ptr;
+
radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
&cmd_buffer->gfx9_fence_offset,
&fence_ptr);
cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
+
+ /* Allocate a buffer for the EOP bug on GFX9. */
+ radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
+ &eop_bug_offset, &fence_ptr);
+ cmd_buffer->gfx9_eop_bug_va =
+ radv_buffer_get_va(cmd_buffer->upload.upload_bo);
+ cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
}
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
new_size, 4096,
RADEON_DOMAIN_GTT,
RADEON_FLAG_CPU_ACCESS|
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_32BIT);
if (!bo) {
cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
return false;
}
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
if (cmd_buffer->upload.upload_bo) {
upload = malloc(sizeof(*upload));
}
static void
-radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
+radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
unsigned count, const uint32_t *data)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+
+ radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
+
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
S_370_WR_CONFIRM(1) |
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va;
va = radv_buffer_get_va(device->trace_bo);
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
va += 4;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
-
++cmd_buffer->state.trace_id;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
+ radv_emit_write_data_packet(cmd_buffer, va, 1,
+ &cmd_buffer->state.trace_id);
+
+ radeon_check_space(cmd_buffer->device->ws, cs, 2);
+
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
}
ptr = &cmd_buffer->gfx9_fence_idx;
}
+ radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
+
/* Force wait for graphics or compute engines to be idle. */
si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
- flags);
+ flags, cmd_buffer->gfx9_eop_bug_va);
}
if (unlikely(cmd_buffer->device->trace_bo))
struct radv_pipeline *pipeline, enum ring_type ring)
{
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint32_t data[2];
uint64_t va;
assert(!"invalid ring type");
}
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
- cmd_buffer->cs, 6);
-
data[0] = (uintptr_t)pipeline;
data[1] = (uintptr_t)pipeline >> 32;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, 2, data);
+ radv_emit_write_data_packet(cmd_buffer, va, 2, data);
}
void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
radv_get_descriptors_state(cmd_buffer, bind_point);
descriptors_state->sets[idx] = set;
- if (set)
- descriptors_state->valid |= (1u << idx);
- else
- descriptors_state->valid &= ~(1u << idx);
+
+ descriptors_state->valid |= (1u << idx); /* active descriptors */
descriptors_state->dirty |= (1u << idx);
}
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
uint32_t data[MAX_SETS * 2] = {};
uint64_t va;
unsigned i;
va = radv_buffer_get_va(device->trace_bo) + 24;
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
- cmd_buffer->cs, 4 + MAX_SETS * 2);
-
for_each_bit(i, descriptors_state->valid) {
struct radv_descriptor_set *set = descriptors_state->sets[i];
data[i * 2] = (uintptr_t)set;
data[i * 2 + 1] = (uintptr_t)set >> 32;
}
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
- radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
+ radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
}
-struct ac_userdata_info *
+struct radv_userdata_info *
radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
int idx)
{
- if (stage == MESA_SHADER_VERTEX) {
- if (pipeline->shaders[MESA_SHADER_VERTEX])
- return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
- return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- } else if (stage == MESA_SHADER_TESS_EVAL) {
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
- return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
- if (pipeline->shaders[MESA_SHADER_GEOMETRY])
- return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
- }
- return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
+ struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
+ return &shader->info.user_sgprs_locs.shader_data[idx];
}
static void
gl_shader_stage stage,
int idx, uint64_t va)
{
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
uint32_t base_reg = pipeline->user_data_0[stage];
if (loc->sgpr_idx == -1)
return;
- assert(loc->num_sgprs == 2);
+
+ assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
assert(!loc->indirect);
- radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
+
+ radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
+ base_reg + loc->sgpr_idx * 4, va, false);
+}
+
+static void
+radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ struct radv_descriptor_state *descriptors_state,
+ gl_shader_stage stage)
+{
+ struct radv_device *device = cmd_buffer->device;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ uint32_t sh_base = pipeline->user_data_0[stage];
+ struct radv_userdata_locations *locs =
+ &pipeline->shaders[stage]->info.user_sgprs_locs;
+ unsigned mask = locs->descriptor_sets_enabled;
+
+ mask &= descriptors_state->dirty & descriptors_state->valid;
+
+ while (mask) {
+ int start, count;
+
+ u_bit_scan_consecutive_range(&mask, &start, &count);
+
+ struct radv_userdata_info *loc = &locs->descriptor_sets[start];
+ unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
+
+ radv_emit_shader_pointer_head(cs, sh_offset, count,
+ HAVE_32BIT_POINTERS);
+ for (int i = 0; i < count; i++) {
+ struct radv_descriptor_set *set =
+ descriptors_state->sets[start + i];
+
+ radv_emit_shader_pointer_body(device, cs, set->va,
+ HAVE_32BIT_POINTERS);
+ }
+ }
}
static void
}
}
+static void
+radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_shader_variant *shader)
+{
+ uint64_t va;
+ if (!shader)
+ return;
-static inline void
-radv_emit_prefetch_TC_L2_async(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
- unsigned size)
-{
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK)
- si_cp_dma_prefetch(cmd_buffer, va, size);
+ va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+
+ si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
}
static void
-radv_emit_VBO_descriptors_prefetch(struct radv_cmd_buffer *cmd_buffer)
+radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline,
+ bool vertex_stage_only)
{
- if (cmd_buffer->state.vb_prefetch_dirty) {
- radv_emit_prefetch_TC_L2_async(cmd_buffer,
- cmd_buffer->state.vb_va,
- cmd_buffer->state.vb_size);
- cmd_buffer->state.vb_prefetch_dirty = false;
+ struct radv_cmd_state *state = &cmd_buffer->state;
+ uint32_t mask = state->prefetch_L2_mask;
+
+ if (vertex_stage_only) {
+ /* Fast prefetch path for starting draws as soon as possible.
+ */
+ mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
+ RADV_PREFETCH_VBO_DESCRIPTORS);
+ }
+
+ if (mask & RADV_PREFETCH_VS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_VERTEX]);
+
+ if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
+ si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
+
+ if (mask & RADV_PREFETCH_TCS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_CTRL]);
+
+ if (mask & RADV_PREFETCH_TES)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_TESS_EVAL]);
+
+ if (mask & RADV_PREFETCH_GS) {
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_GEOMETRY]);
+ radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
}
+
+ if (mask & RADV_PREFETCH_PS)
+ radv_emit_shader_prefetch(cmd_buffer,
+ pipeline->shaders[MESA_SHADER_FRAGMENT]);
+
+ state->prefetch_L2_mask &= ~mask;
}
static void
-radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
- struct radv_shader_variant *shader)
+radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va;
-
- if (!shader)
+ if (!cmd_buffer->device->physical_device->rbplus_allowed)
return;
- va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
- radv_cs_add_buffer(ws, cs, shader->bo, 8);
- radv_emit_prefetch_TC_L2_async(cmd_buffer, va, shader->code_size);
-}
+ unsigned sx_ps_downconvert = 0;
+ unsigned sx_blend_opt_epsilon = 0;
+ unsigned sx_blend_opt_control = 0;
-static void
-radv_emit_prefetch(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
-{
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_VERTEX]);
- radv_emit_VBO_descriptors_prefetch(cmd_buffer);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_TESS_CTRL]);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_TESS_EVAL]);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_GEOMETRY]);
- radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
- radv_emit_shader_prefetch(cmd_buffer,
- pipeline->shaders[MESA_SHADER_FRAGMENT]);
+ for (unsigned i = 0; i < subpass->color_count; ++i) {
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+ continue;
+ }
+
+ int idx = subpass->color_attachments[i].attachment;
+ struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
+
+ unsigned format = G_028C70_FORMAT(cb->cb_color_info);
+ unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
+ uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
+ uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
+
+ bool has_alpha, has_rgb;
+
+ /* Set if RGB and A are present. */
+ has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
+
+ if (format == V_028C70_COLOR_8 ||
+ format == V_028C70_COLOR_16 ||
+ format == V_028C70_COLOR_32)
+ has_rgb = !has_alpha;
+ else
+ has_rgb = true;
+
+ /* Check the colormask and export format. */
+ if (!(colormask & 0x7))
+ has_rgb = false;
+ if (!(colormask & 0x8))
+ has_alpha = false;
+
+ if (spi_format == V_028714_SPI_SHADER_ZERO) {
+ has_rgb = false;
+ has_alpha = false;
+ }
+
+ /* Disable value checking for disabled channels. */
+ if (!has_rgb)
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ if (!has_alpha)
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+
+ /* Enable down-conversion for 32bpp and smaller formats. */
+ switch (format) {
+ case V_028C70_COLOR_8:
+ case V_028C70_COLOR_8_8:
+ case V_028C70_COLOR_8_8_8_8:
+ /* For 1 and 2-channel formats, use the superset thereof. */
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_5_6_5:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_1_5_5_5:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_4_4_4_4:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_32:
+ if (swap == V_028C70_SWAP_STD &&
+ spi_format == V_028714_SPI_SHADER_32_R)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
+ else if (swap == V_028C70_SWAP_ALT_REV &&
+ spi_format == V_028714_SPI_SHADER_32_AR)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
+ break;
+
+ case V_028C70_COLOR_16:
+ case V_028C70_COLOR_16_16:
+ /* For 1-channel formats, use the superset thereof. */
+ if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
+ spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
+ if (swap == V_028C70_SWAP_STD ||
+ swap == V_028C70_SWAP_STD_REV)
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
+ else
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_10_11_11:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
+ }
+ break;
+
+ case V_028C70_COLOR_2_10_10_10:
+ if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
+ sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
+ sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
+ }
+ break;
+ }
+ }
+
+ for (unsigned i = subpass->color_count; i < 8; ++i) {
+ sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
+ sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
+ }
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
+ radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
+ radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
+ radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
}
static void
radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
+ for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
+ if (!pipeline->shaders[i])
+ continue;
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->shaders[i]->bo);
+ }
+
+ if (radv_pipeline_has_gs(pipeline))
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->gs_copy_shader->bo);
+
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
{
uint32_t count = cmd_buffer->state.dynamic.scissor.count;
- /* Vega10/Raven scissor bug workaround. This must be done before VPORT
- * scissor registers are changed. There is also a more efficient but
- * more involved alternative workaround.
- */
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(cmd_buffer);
- }
si_write_scissors(cmd_buffer->cs, 0, count,
cmd_buffer->state.dynamic.scissor.scissors,
cmd_buffer->state.dynamic.viewport.viewports,
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
radeon_emit(cmd_buffer->cs, cb->cb_color_base);
- radeon_emit(cmd_buffer->cs, cb->cb_color_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
radeon_emit(cmd_buffer->cs, cb->cb_color_view);
radeon_emit(cmd_buffer->cs, cb_color_info);
radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
- radeon_emit(cmd_buffer->cs, cb->cb_color_cmask >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
- radeon_emit(cmd_buffer->cs, cb->cb_color_fmask >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
- radeon_emit(cmd_buffer->cs, cb->cb_dcc_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
}
}
+static void
+radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_ds_buffer_info *ds,
+ struct radv_image *image, VkImageLayout layout,
+ bool requires_cond_write)
+{
+ uint32_t db_z_info = ds->db_z_info;
+ uint32_t db_z_info_reg;
+
+ if (!radv_image_is_tc_compat_htile(image))
+ return;
+
+ if (!radv_layout_has_htile(image, layout,
+ radv_image_queue_family_mask(image,
+ cmd_buffer->queue_family_index,
+ cmd_buffer->queue_family_index))) {
+ db_z_info &= C_028040_TILE_SURFACE_ENABLE;
+ }
+
+ db_z_info &= C_028040_ZRANGE_PRECISION;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ db_z_info_reg = R_028038_DB_Z_INFO;
+ } else {
+ db_z_info_reg = R_028040_DB_Z_INFO;
+ }
+
+ /* When we don't know the last fast clear value we need to emit a
+ * conditional packet, otherwise we can update DB_Z_INFO directly.
+ */
+ if (requires_cond_write) {
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
+
+ const uint32_t write_space = 0 << 8; /* register */
+ const uint32_t poll_space = 1 << 4; /* memory */
+ const uint32_t function = 3 << 0; /* equal to the reference */
+ const uint32_t options = write_space | poll_space | function;
+ radeon_emit(cmd_buffer->cs, options);
+
+ /* poll address - location of the depth clear value */
+ uint64_t va = radv_buffer_get_va(image->bo);
+ va += image->offset + image->clear_value_offset;
+
+ /* In presence of stencil format, we have to adjust the base
+ * address because the first value is the stencil clear value.
+ */
+ if (vk_format_is_stencil(image->vk_format))
+ va += 4;
+
+ radeon_emit(cmd_buffer->cs, va);
+ radeon_emit(cmd_buffer->cs, va >> 32);
+
+ radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
+ radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
+ radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
+ radeon_emit(cmd_buffer->cs, 0u); /* write address high */
+ radeon_emit(cmd_buffer->cs, db_z_info);
+ } else {
+ radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
+ }
+}
+
static void
radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
struct radv_ds_buffer_info *ds,
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
- radeon_emit(cmd_buffer->cs, ds->db_htile_data_base >> 32);
+ radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
radeon_emit(cmd_buffer->cs, ds->db_depth_size);
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_z_read_base >> 32); /* DB_Z_READ_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base >> 32); /* DB_STENCIL_READ_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_z_write_base >> 32); /* DB_Z_WRITE_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
- radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base >> 32); /* DB_STENCIL_WRITE_BASE_HI */
+ radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
radeon_emit(cmd_buffer->cs, ds->db_z_info2);
}
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
+ radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
+
radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
ds->pa_su_poly_offset_db_fmt_cntl);
}
-void
-radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkClearDepthStencilValue ds_clear_value,
- VkImageAspectFlags aspects)
+/**
+ * Update the fast clear depth/stencil values if the image is bound as a
+ * depth/stencil buffer.
+ */
+static void
+radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->depth_stencil_attachment.attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
+ radeon_emit(cs, ds_clear_value.stencil);
+ radeon_emit(cs, fui(ds_clear_value.depth));
+
+ /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
+ * only needed when clearing Z to 0.0.
+ */
+ if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ ds_clear_value.depth == 0.0) {
+ VkImageLayout layout = subpass->depth_stencil_attachment.layout;
+
+ radv_update_zrange_precision(cmd_buffer, &att->ds, image,
+ layout, false);
+ }
+}
+
+/**
+ * Set the clear depth/stencil values to the image's metadata.
+ */
+static void
+radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- assert(image->surface.htile_size);
+ va += image->offset + image->clear_value_offset;
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
++reg_count;
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
+ radeon_emit(cs, ds_clear_value.stencil);
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
+ radeon_emit(cs, fui(ds_clear_value.depth));
+}
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
- if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
- radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
- if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
- radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
+/**
+ * Update the clear depth/stencil values for this image.
+ */
+void
+radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkClearDepthStencilValue ds_clear_value,
+ VkImageAspectFlags aspects)
+{
+ assert(radv_image_has_htile(image));
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
+
+ radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
+ aspects);
}
+/**
+ * Load the clear depth/stencil values from the image's metadata.
+ */
static void
-radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image)
+radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
uint64_t va = radv_buffer_get_va(image->bo);
- va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
- if (!image->surface.htile_size)
+ va += image->offset + image->clear_value_offset;
+
+ if (!radv_image_has_htile(image))
return;
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
++reg_count;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
}
/*
- *with DCC some colors don't require CMASK elimiation before being
+ * With DCC some colors don't require CMASK elimination before being
* used as a texture. This sets a predicate value to determine if the
* cmask eliminate is required.
*/
uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->dcc_pred_offset;
- assert(image->surface.dcc_size);
+ assert(radv_image_has_dcc(image));
radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
radeon_emit(cmd_buffer->cs, pred_val >> 32);
}
-void
-radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx,
- uint32_t color_values[2])
+/**
+ * Update the fast clear color values if the image is bound as a color buffer.
+ */
+static void
+radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_attachment_info *att;
+ uint32_t att_idx;
+
+ if (!framebuffer || !subpass)
+ return;
+
+ att_idx = subpass->color_attachments[cb_idx].attachment;
+ if (att_idx == VK_ATTACHMENT_UNUSED)
+ return;
+
+ att = &framebuffer->attachments[att_idx];
+ if (att->attachment->image != image)
+ return;
+
+ radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+}
+
+/**
+ * Set the clear color values to the image's metadata.
+ */
+static void
+radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ uint32_t color_values[2])
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
- assert(image->cmask.size || image->surface.dcc_size);
+ assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
- radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_PFP));
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, color_values[0]);
+ radeon_emit(cs, color_values[1]);
+}
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
- radeon_emit(cmd_buffer->cs, color_values[0]);
- radeon_emit(cmd_buffer->cs, color_values[1]);
+/**
+ * Update the clear color values for this image.
+ */
+void
+radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx,
+ uint32_t color_values[2])
+{
+ assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
+
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
+
+ radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
+ color_values);
}
+/**
+ * Load the clear color values from the image's metadata.
+ */
static void
-radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- int idx)
+radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ int cb_idx)
{
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(image->bo);
+
va += image->offset + image->clear_value_offset;
- if (!image->cmask.size && !image->surface.dcc_size)
+ if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
return;
- uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
+ uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_COUNT_SEL);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
- radeon_emit(cmd_buffer->cs, reg >> 2);
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG) |
+ COPY_DATA_COUNT_SEL);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, reg >> 2);
+ radeon_emit(cs, 0);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
- radeon_emit(cmd_buffer->cs, 0);
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
+ radeon_emit(cs, 0);
}
static void
struct radv_image *image = att->attachment->image;
VkImageLayout layout = subpass->color_attachments[i].layout;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
- radv_load_color_clear_regs(cmd_buffer, image, i);
+ radv_load_color_clear_metadata(cmd_buffer, image, i);
}
if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
VkImageLayout layout = subpass->depth_stencil_attachment.layout;
struct radv_attachment_info *att = &framebuffer->attachments[idx];
struct radv_image *image = att->attachment->image;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
cmd_buffer->queue_family_index,
cmd_buffer->queue_family_index);
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
cmd_buffer->state.offset_scale = att->ds.offset_scale;
}
- radv_load_depth_clear_regs(cmd_buffer, image);
+ radv_load_ds_clear_metadata(cmd_buffer, image);
} else {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
static void
radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
struct radv_cmd_state *state = &cmd_buffer->state;
if (state->index_type != state->last_index_type) {
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
{
+ bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
+ struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t pa_sc_mode_cntl_1 =
+ pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
uint32_t db_count_control;
if(!cmd_buffer->state.active_occlusion_queries) {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
- db_count_control = 0;
- } else {
- db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
+ if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
+ has_perfect_queries) {
+ /* Re-enable out-of-order rasterization if the
+ * bound pipeline supports it and if it's has
+ * been disabled before starting any perfect
+ * occlusion queries.
+ */
+ radeon_set_context_reg(cmd_buffer->cs,
+ R_028A4C_PA_SC_MODE_CNTL_1,
+ pa_sc_mode_cntl_1);
+ }
}
+ db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
} else {
+ const struct radv_subpass *subpass = cmd_buffer->state.subpass;
+ uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
+
if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
- db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */
+ db_count_control =
+ S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
+ S_028004_SAMPLE_RATE(sample_rate) |
S_028004_ZPASS_ENABLE(1) |
S_028004_SLICE_EVEN_ENABLE(1) |
S_028004_SLICE_ODD_ENABLE(1);
+
+ if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
+ pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
+ has_perfect_queries) {
+ /* If the bound pipeline has enabled
+ * out-of-order rasterization, we should
+ * disable it before starting any perfect
+ * occlusion queries.
+ */
+ pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
+
+ radeon_set_context_reg(cmd_buffer->cs,
+ R_028A4C_PA_SC_MODE_CNTL_1,
+ pa_sc_mode_cntl_1);
+ }
} else {
db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
- S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */
+ S_028004_SAMPLE_RATE(sample_rate);
}
}
if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
radv_emit_viewport(cmd_buffer);
- if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
+ if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
+ !cmd_buffer->device->physical_device->has_scissor_bug)
radv_emit_scissor(cmd_buffer);
if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
cmd_buffer->state.dirty &= ~states;
}
-static void
-emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
- int idx,
- uint64_t va,
- gl_shader_stage stage)
-{
- struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = pipeline->user_data_0[stage];
-
- if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
- return;
-
- assert(!desc_set_loc->indirect);
- assert(desc_set_loc->num_sgprs == 2);
- radeon_set_sh_reg_seq(cmd_buffer->cs,
- base_reg + desc_set_loc->sgpr_idx * 4, 2);
- radeon_emit(cmd_buffer->cs, va);
- radeon_emit(cmd_buffer->cs, va >> 32);
-}
-
-static void
-radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
- VkShaderStageFlags stages,
- struct radv_descriptor_set *set,
- unsigned idx)
-{
- if (cmd_buffer->state.pipeline) {
- radv_foreach_stage(stage, stages) {
- if (cmd_buffer->state.pipeline->shaders[stage])
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
- idx, set->va,
- stage);
- }
- }
-
- if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
- emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
- idx, set->va,
- MESA_SHADER_COMPUTE);
-}
-
static void
radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point)
VK_PIPELINE_BIND_POINT_GRAPHICS;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
- unsigned i;
if (!descriptors_state->dirty)
return;
cmd_buffer->cs,
MAX_SETS * MESA_SHADER_STAGES * 4);
- for_each_bit(i, descriptors_state->dirty) {
- struct radv_descriptor_set *set = descriptors_state->sets[i];
- if (!(descriptors_state->valid & (1u << i)))
- continue;
+ if (cmd_buffer->state.pipeline) {
+ radv_foreach_stage(stage, stages) {
+ if (!cmd_buffer->state.pipeline->shaders[stage])
+ continue;
- radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.pipeline,
+ descriptors_state, stage);
+ }
}
+
+ if (cmd_buffer->state.compute_pipeline &&
+ (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
+ radv_emit_descriptor_pointers(cmd_buffer,
+ cmd_buffer->state.compute_pipeline,
+ descriptors_state,
+ MESA_SHADER_COMPUTE);
+ }
+
descriptors_state->dirty = 0;
descriptors_state->push_dirty = false;
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_descriptors(cmd_buffer, bind_point);
-
- assert(cmd_buffer->cs->cdw <= cdw_max);
}
static void
radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline,
VkShaderStageFlags stages)
{
+ struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
+ ? cmd_buffer->state.compute_pipeline
+ : cmd_buffer->state.pipeline;
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
+ struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
void *ptr;
uint64_t va;
return;
memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
16 * layout->dynamic_offset_count);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
cmd_buffer->cs, MESA_SHADER_STAGES * 4);
+ prev_shader = NULL;
radv_foreach_stage(stage, stages) {
- if (pipeline->shaders[stage]) {
+ shader = radv_get_shader(pipeline, stage);
+
+ /* Avoid redundantly emitting the address for merged stages. */
+ if (shader && shader != prev_shader) {
radv_emit_userdata_address(cmd_buffer, pipeline, stage,
AC_UD_PUSH_CONSTANTS, va);
+
+ prev_shader = shader;
}
}
assert(cmd_buffer->cs->cdw <= cdw_max);
}
-static bool
-radv_cmd_buffer_update_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
+static void
+radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
+ bool pipeline_is_dirty)
{
if ((pipeline_is_dirty ||
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
cmd_buffer->state.pipeline->vertex_elements.count &&
- radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
+ radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
/* allocate some descriptor state for vertex buffers */
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
&vb_offset, &vb_ptr))
- return false;
+ return;
for (i = 0; i < count; i++) {
uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
cmd_buffer->state.vb_va = va;
cmd_buffer->state.vb_size = count * 16;
- cmd_buffer->state.vb_prefetch_dirty = true;
+ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
}
cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
-
- return true;
}
-static bool
+static void
radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
{
- if (!radv_cmd_buffer_update_vertex_descriptors(cmd_buffer, pipeline_is_dirty))
- return false;
-
+ radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
- radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline,
- VK_SHADER_STAGE_ALL_GRAPHICS);
-
- return true;
+ radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
}
static void
{
struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint32_t ia_multi_vgt_param;
int32_t primitive_reset_en;
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
}
- if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
- VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
- VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
} else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
- VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
}
}
static enum radv_cmd_flush_bits
radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
- VkAccessFlags src_flags)
+ VkAccessFlags src_flags,
+ struct radv_image *image)
{
+ bool flush_CB_meta = true, flush_DB_meta = true;
enum radv_cmd_flush_bits flush_bits = 0;
uint32_t b;
+
+ if (image) {
+ if (!radv_image_has_CB_metadata(image))
+ flush_CB_meta = false;
+ if (!radv_image_has_htile(image))
+ flush_DB_meta = false;
+ }
+
for_each_bit(b, src_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_SHADER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
RADV_CMD_FLAG_INV_GLOBAL_L2;
+
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
default:
break;
VkAccessFlags dst_flags,
struct radv_image *image)
{
+ bool flush_CB_meta = true, flush_DB_meta = true;
enum radv_cmd_flush_bits flush_bits = 0;
+ bool flush_CB = true, flush_DB = true;
uint32_t b;
+
+ if (image) {
+ if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
+ flush_CB = false;
+ flush_DB = false;
+ }
+
+ if (!radv_image_has_CB_metadata(image))
+ flush_CB_meta = false;
+ if (!radv_image_has_htile(image))
+ flush_DB_meta = false;
+ }
+
for_each_bit(b, dst_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
RADV_CMD_FLAG_INV_GLOBAL_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
- /* TODO: change to image && when the image gets passed
- * through from the subpass. */
- if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ if (flush_CB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (flush_CB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
- if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ if (flush_DB)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (flush_DB_meta)
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
break;
default:
break;
return flush_bits;
}
-static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
+void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
+ const struct radv_subpass_barrier *barrier)
{
- cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
+ cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
+ NULL);
radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
NULL);
}
static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
- VkAttachmentReference att)
+ struct radv_subpass_attachment att)
{
unsigned idx = att.attachment;
struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
struct radv_device *device = cmd_buffer->device;
if (device->gfx_init) {
uint64_t va = radv_buffer_get_va(device->gfx_init);
- radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
radeon_emit(cmd_buffer->cs, va);
radeon_emit(cmd_buffer->cs, va >> 32);
}
}
- if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
+ (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- if (unlikely(cmd_buffer->device->trace_bo))
+ if (unlikely(cmd_buffer->device->trace_bo)) {
+ struct radv_device *device = cmd_buffer->device;
+
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs,
+ device->trace_bo);
+
radv_cmd_buffer_trace_emit(cmd_buffer);
+ }
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
vb[idx].offset = pOffsets[i];
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
- vb[idx].buffer->bo, 8);
+ vb[idx].buffer->bo);
}
if (!changed) {
int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
- radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
}
struct radeon_winsys *ws = cmd_buffer->device->ws;
radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
- if (!set)
- return;
+ assert(set);
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
- for (unsigned j = 0; j < set->layout->buffer_count; ++j)
- if (set->descriptors[j])
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
+ if (!cmd_buffer->device->use_global_bo_list) {
+ for (unsigned j = 0; j < set->layout->buffer_count; ++j)
+ if (set->descriptors[j])
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
+ }
if(set->bo)
- radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
+ radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
}
void radv_CmdBindDescriptorSets(
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
unsigned dyn_idx = 0;
+ const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
+
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
- uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
+ uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
assert(dyn_idx < dynamicOffsetCount);
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
uint64_t va = range->va + pDynamicOffsets[dyn_idx];
dst[0] = va;
dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
- dst[2] = range->size;
+ dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
si_emit_cache_flush(cmd_buffer);
}
+ /* Make sure CP DMA is idle at the end of IBs because the kernel
+ * doesn't wait for it.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
MAX2(cmd_buffer->compute_scratch_size_needed,
pipeline->max_waves * pipeline->scratch_bytes_per_wave);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
+ pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
+
if (unlikely(cmd_buffer->device->trace_bo))
radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
}
cmd_buffer->state.last_first_instance = -1;
cmd_buffer->state.last_vertex_offset = -1;
+ /* Prefetch all pipeline shaders at first draw time. */
+ cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
+
radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
if (radv_pipeline_has_tess(pipeline))
cmd_buffer->tess_rings_needed = true;
-
- if (radv_pipeline_has_gs(pipeline)) {
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
- AC_UD_SCRATCH_RING_OFFSETS);
- if (cmd_buffer->ring_offsets_idx == -1)
- cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
- else if (loc->sgpr_idx != -1)
- assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
- }
break;
default:
assert(!"invalid bind point");
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- /* Try to skip unnecessary PS partial flushes when the viewports
- * don't change.
- */
- if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
- RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
- !memcmp(state->dynamic.viewport.viewports + firstViewport,
- pViewports, viewportCount * sizeof(*pViewports))) {
- return;
- }
- }
-
memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
viewportCount * sizeof(*pViewports));
assert(firstScissor < MAX_SCISSORS);
assert(total_count >= 1 && total_count <= MAX_SCISSORS);
- if (cmd_buffer->device->physical_device->has_scissor_bug) {
- /* Try to skip unnecessary PS partial flushes when the scissors
- * don't change.
- */
- if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
- RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
- !memcmp(state->dynamic.scissor.scissors + firstScissor,
- pScissors, scissorCount * sizeof(*pScissors))) {
- return;
- }
- }
-
memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
scissorCount * sizeof(*pScissors));
if (secondary->sample_positions_needed)
primary->sample_positions_needed = true;
- if (secondary->ring_offsets_idx != -1) {
- if (primary->ring_offsets_idx == -1)
- primary->ring_offsets_idx = secondary->ring_offsets_idx;
- else
- assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
- }
primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
secondary->state.last_ia_multi_vgt_param;
}
- if (secondary->state.last_first_instance != -1) {
- primary->state.last_first_instance =
- secondary->state.last_first_instance;
- }
-
- if (secondary->state.last_num_instances != -1) {
- primary->state.last_num_instances =
- secondary->state.last_num_instances;
- }
-
- if (secondary->state.last_vertex_offset != -1) {
- primary->state.last_vertex_offset =
- secondary->state.last_vertex_offset;
- }
+ primary->state.last_first_instance = secondary->state.last_first_instance;
+ primary->state.last_num_instances = secondary->state.last_num_instances;
+ primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
if (secondary->state.last_index_type != -1) {
primary->state.last_index_type =
pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (pAllocator)
pool->alloc = *pAllocator;
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdBeginRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBeginInfo,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+{
+ radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
+ pSubpassBeginInfo->contents);
+}
+
void radv_CmdNextSubpass(
VkCommandBuffer commandBuffer,
VkSubpassContents contents)
radv_cmd_buffer_clear_subpass(cmd_buffer);
}
+void radv_CmdNextSubpass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
+}
+
static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
{
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
- if (!pipeline->shaders[stage])
+ if (!radv_get_shader(pipeline, stage))
continue;
- struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
+
+ struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
uint32_t base_reg = pipeline->user_data_0[stage];
}
if (pipeline->gs_copy_shader) {
- struct ac_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
+ struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
if (loc->sgpr_idx != -1) {
uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
uint64_t index_va,
uint32_t index_count)
{
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
+ radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
radeon_emit(cmd_buffer->cs, index_va);
radeon_emit(cmd_buffer->cs, index_va >> 32);
uint64_t count_va,
uint32_t stride)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
+ bool predicating = cmd_buffer->state.predicating;
assert(base_reg);
/* just reset draw state for vertex data */
if (draw_count == 1 && !count_va && !draw_id_enable) {
radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
- PKT3_DRAW_INDIRECT, 3, false));
+ PKT3_DRAW_INDIRECT, 3, predicating));
radeon_emit(cs, 0);
radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
} else {
radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
PKT3_DRAW_INDIRECT_MULTI,
- 8, false));
+ 8, predicating));
radeon_emit(cs, 0);
radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
{
struct radv_cmd_state *state = &cmd_buffer->state;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
count_va += info->count_buffer->offset +
info->count_buffer_offset;
- radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
}
if (!state->subpass->view_mask) {
}
if (state->last_num_instances != info->instance_count) {
- radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, state->predicating));
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
radeon_emit(cs, info->instance_count);
state->last_num_instances = info->instance_count;
}
}
}
+/*
+ * Vega and raven have a bug which triggers if there are multiple context
+ * register contexts active at the same time with different scissor values.
+ *
+ * There are two possible workarounds:
+ * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
+ * there is only ever 1 active set of scissor values at the same time.
+ *
+ * 2) Whenever the hardware switches contexts we have to set the scissor
+ * registers again even if it is a noop. That way the new context gets
+ * the correct scissor values.
+ *
+ * This implements option 2. radv_need_late_scissor_emission needs to
+ * return true on affected HW if radv_emit_all_graphics_states sets
+ * any context registers.
+ */
+static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
+ bool indexed_draw)
+{
+ struct radv_cmd_state *state = &cmd_buffer->state;
+
+ if (!cmd_buffer->device->physical_device->has_scissor_bug)
+ return false;
+
+ uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
+
+ /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
+ used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
+
+ /* Assume all state changes except these two can imply context rolls. */
+ if (cmd_buffer->state.dirty & used_states)
+ return true;
+
+ if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
+ return true;
+
+ if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
+ (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
+ return true;
+
+ return false;
+}
+
static void
radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
+
+ if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
+ cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
+ radv_emit_rbplus_state(cmd_buffer);
+
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
radv_emit_graphics_pipeline(cmd_buffer);
radv_emit_draw_registers(cmd_buffer, info->indexed,
info->instance_count > 1, info->indirect,
info->indirect ? 0 : info->count);
+
+ if (late_scissor_emission)
+ radv_emit_scissor(cmd_buffer);
}
static void
radv_draw(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
- cmd_buffer->state.pipeline &&
cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
MAYBE_UNUSED unsigned cdw_max =
si_emit_cache_flush(cmd_buffer);
/* <-- CUs are idle here --> */
- if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
- return;
+ radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
radv_emit_draw_packets(cmd_buffer, info);
/* <-- CUs are busy here --> */
* run in parallel, but starting the draw first is more
* important.
*/
- if (pipeline_is_dirty) {
- radv_emit_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, false);
}
} else {
/* If we don't wait for idle, start prefetches first, then set
*/
si_emit_cache_flush(cmd_buffer);
- if (pipeline_is_dirty) {
- radv_emit_prefetch(cmd_buffer,
- cmd_buffer->state.pipeline);
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ /* Only prefetch the vertex shader and VBO descriptors
+ * in order to start the draw as soon as possible.
+ */
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, true);
}
- if (!radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty))
- return;
+ radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
radv_emit_all_graphics_states(cmd_buffer, info);
radv_emit_draw_packets(cmd_buffer, info);
+
+ /* Prefetch the remaining shaders after the draw has been
+ * started.
+ */
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
+ radv_emit_prefetch_L2(cmd_buffer,
+ cmd_buffer->state.pipeline, false);
+ }
}
assert(cmd_buffer->cs->cdw <= cdw_max);
radv_draw(cmd_buffer, &info);
}
+void radv_CmdDrawIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer _countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
+}
+
+void radv_CmdDrawIndexedIndirectCountKHR(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkBuffer _countBuffer,
+ VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount,
+ uint32_t stride)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
+ RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
+ struct radv_draw_info info = {};
+
+ info.indexed = true;
+ info.count = maxDrawCount;
+ info.indirect = buffer;
+ info.indirect_offset = offset;
+ info.count_buffer = count_buffer;
+ info.count_buffer_offset = countBufferOffset;
+ info.stride = stride;
+
+ radv_draw(cmd_buffer, &info);
+}
+
struct radv_dispatch_info {
/**
* Determine the layout of the grid (in block units) to be used.
*/
uint32_t blocks[3];
+ /**
+ * A starting offset for the grid. If unaligned is set, the offset
+ * must still be aligned.
+ */
+ uint32_t offsets[3];
/**
* Whether it's an unaligned compute dispatch.
*/
struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
struct radeon_winsys *ws = cmd_buffer->device->ws;
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
- struct ac_userdata_info *loc;
+ bool predicating = cmd_buffer->state.predicating;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
+ struct radv_userdata_info *loc;
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
AC_UD_CS_GRID_SIZE);
va += info->indirect->offset + info->indirect_offset;
- radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
+ radv_cs_add_buffer(ws, cs, info->indirect->bo);
if (loc->sgpr_idx != -1) {
for (unsigned i = 0; i < 3; ++i) {
}
if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
- radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
- radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, 0);
radeon_emit(cs, dispatch_initiator);
}
} else {
unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
+ unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
if (info->unaligned) {
unsigned *cs_block_size = compute_shader->info.cs.block_size;
blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
+ for(unsigned i = 0; i < 3; ++i) {
+ assert(offsets[i] % cs_block_size[i] == 0);
+ offsets[i] /= cs_block_size[i];
+ }
+
radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cs,
S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
radeon_emit(cs, blocks[2]);
}
- radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
+ if (offsets[0] || offsets[1] || offsets[2]) {
+ radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
+ radeon_emit(cs, offsets[0]);
+ radeon_emit(cs, offsets[1]);
+ radeon_emit(cs, offsets[2]);
+
+ /* The blocks in the packet are not counts but end values. */
+ for (unsigned i = 0; i < 3; ++i)
+ blocks[i] += offsets[i];
+ } else {
+ dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, blocks[0]);
radeon_emit(cs, blocks[1]);
radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
{
radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
- radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline,
- VK_SHADER_STAGE_COMPUTE_BIT);
+ radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
}
static void
const struct radv_dispatch_info *info)
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
* will run in parallel, but starting the dispatch first is
* more important.
*/
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}
*/
si_emit_cache_flush(cmd_buffer);
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}
radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
}
-void radv_CmdDispatch(
+void radv_CmdDispatchBase(
VkCommandBuffer commandBuffer,
+ uint32_t base_x,
+ uint32_t base_y,
+ uint32_t base_z,
uint32_t x,
uint32_t y,
uint32_t z)
info.blocks[1] = y;
info.blocks[2] = z;
+ info.offsets[0] = base_x;
+ info.offsets[1] = base_y;
+ info.offsets[2] = base_z;
radv_dispatch(cmd_buffer, &info);
}
+void radv_CmdDispatch(
+ VkCommandBuffer commandBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z)
+{
+ radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
+}
+
void radv_CmdDispatchIndirect(
VkCommandBuffer commandBuffer,
VkBuffer _buffer,
for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
radv_handle_subpass_image_transition(cmd_buffer,
- (VkAttachmentReference){i, layout});
+ (struct radv_subpass_attachment){i, layout});
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
cmd_buffer->state.framebuffer = NULL;
}
+void radv_CmdEndRenderPass2KHR(
+ VkCommandBuffer commandBuffer,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo)
+{
+ radv_CmdEndRenderPass(commandBuffer);
+}
+
/*
* For HTILE we have the following interesting clear words:
* 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
unsigned layer_count = radv_get_layerCount(image, range);
uint64_t size = image->surface.htile_slice_size * layer_count;
+ VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
uint64_t offset = image->offset + image->htile_offset +
image->surface.htile_slice_size * range->baseArrayLayer;
struct radv_cmd_state *state = &cmd_buffer->state;
+ VkClearDepthStencilValue value = {};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
size, clear_word);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+
+ if (vk_format_is_stencil(image->vk_format))
+ aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
}
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
const VkImageSubresourceRange *range,
VkImageAspectFlags pending_clears)
{
- if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
- (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
- cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
- cmd_buffer->state.render_area.extent.width == image->info.width &&
- cmd_buffer->state.render_area.extent.height == image->info.height) {
- /* The clear will initialize htile. */
+ if (!radv_image_has_htile(image))
return;
- } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
/* TODO: merge with the clear if applicable */
radv_initialize_htile(cmd_buffer, image, range, 0);
}
}
-void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image, uint32_t value)
+static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->cmask.offset,
- image->cmask.size, value);
+ state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
-static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkImageLayout src_layout,
- VkImageLayout dst_layout,
- unsigned src_queue_mask,
- unsigned dst_queue_mask,
- const VkImageSubresourceRange *range)
-{
- if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- if (image->fmask.size)
- radv_initialise_cmask(cmd_buffer, image, 0xccccccccu);
- else
- radv_initialise_cmask(cmd_buffer, image, 0xffffffffu);
- } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
- radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
- }
-}
-
void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image, uint32_t value)
{
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
- state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo,
- image->offset + image->dcc_offset,
- image->surface.dcc_size, value);
+ state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
}
-static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer,
- struct radv_image *image,
- VkImageLayout src_layout,
- VkImageLayout dst_layout,
- unsigned src_queue_mask,
- unsigned dst_queue_mask,
- const VkImageSubresourceRange *range)
+/**
+ * Initialize DCC/FMASK/CMASK metadata for a color image.
+ */
+static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkImageLayout src_layout,
+ VkImageLayout dst_layout,
+ unsigned src_queue_mask,
+ unsigned dst_queue_mask)
+{
+ if (radv_image_has_cmask(image)) {
+ uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+
+ /* TODO: clarify this. */
+ if (radv_image_has_fmask(image)) {
+ value = 0xccccccccu;
+ }
+
+ radv_initialise_cmask(cmd_buffer, image, value);
+ }
+
+ if (radv_image_has_dcc(image)) {
+ uint32_t value = 0xffffffffu; /* Fully expanded mode. */
+
+ if (radv_layout_dcc_compressed(image, dst_layout,
+ dst_queue_mask)) {
+ value = 0x20202020u;
+ }
+
+ radv_initialize_dcc(cmd_buffer, image, value);
+
+ radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image, false);
+ }
+
+ if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
+ uint32_t color_values[2] = {};
+ radv_set_color_clear_metadata(cmd_buffer, image, color_values);
+ }
+}
+
+/**
+ * Handle color image transitions for DCC/FMASK/CMASK.
+ */
+static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_image *image,
+ VkImageLayout src_layout,
+ VkImageLayout dst_layout,
+ unsigned src_queue_mask,
+ unsigned dst_queue_mask,
+ const VkImageSubresourceRange *range)
{
- if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
- radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
- } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
- radv_initialize_dcc(cmd_buffer, image,
- radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask) ?
- 0x20202020u : 0xffffffffu);
- } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
- !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
- radv_decompress_dcc(cmd_buffer, image, range);
- } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
- !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
- radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ radv_init_color_image_metadata(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask);
+ return;
+ }
+
+ if (radv_image_has_dcc(image)) {
+ if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+ radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
+ } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
+ !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
+ radv_decompress_dcc(cmd_buffer, image, range);
+ } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ }
+ } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
+ if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
+ !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
+ radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
+ }
}
}
return;
}
- unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family, cmd_buffer->queue_family_index);
- unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family, cmd_buffer->queue_family_index);
-
- if (image->surface.htile_size)
- radv_handle_depth_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
-
- if (image->cmask.size || image->fmask.size)
- radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range);
-
- if (image->surface.dcc_size)
- radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
- dst_layout, src_queue_mask,
- dst_queue_mask, range);
+ unsigned src_queue_mask =
+ radv_image_queue_family_mask(image, src_family,
+ cmd_buffer->queue_family_index);
+ unsigned dst_queue_mask =
+ radv_image_queue_family_mask(image, dst_family,
+ cmd_buffer->queue_family_index);
+
+ if (vk_format_is_depth(image->vk_format)) {
+ radv_handle_depth_image_transition(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask,
+ range, pending_clears);
+ } else {
+ radv_handle_color_image_transition(cmd_buffer, image,
+ src_layout, dst_layout,
+ src_queue_mask, dst_queue_mask,
+ range);
+ }
}
-void radv_CmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- VkBool32 byRegion,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier* pImageMemoryBarriers)
+struct radv_barrier_info {
+ uint32_t eventCount;
+ const VkEvent *pEvents;
+ VkPipelineStageFlags srcStageMask;
+};
+
+static void
+radv_barrier(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers,
+ const struct radv_barrier_info *info)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ for (unsigned i = 0; i < info->eventCount; ++i) {
+ RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
+ uint64_t va = radv_buffer_get_va(event->bo);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+
+ si_emit_wait_fence(cs, va, 1, 0xffffffff);
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+ }
+
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
+
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
+ image);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
image);
}
- radv_stage_flush(cmd_buffer, srcStageMask);
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
0);
}
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
cmd_buffer->state.flush_bits |= dst_flush_bits;
}
+void radv_CmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_barrier_info info;
+
+ info.eventCount = 0;
+ info.pEvents = NULL;
+ info.srcStageMask = srcStageMask;
+
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
+
static void write_event(struct radv_cmd_buffer *cmd_buffer,
struct radv_event *event,
VkPipelineStageFlags stageMask,
unsigned value)
{
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
uint64_t va = radv_buffer_get_va(event->bo);
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
- /* TODO: this is overkill. Probably should figure something out from
- * the stage mask. */
+ /* Flags that only require a top-of-pipe event. */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+
+ /* Flags that only require a post-index-fetch event. */
+ VkPipelineStageFlags post_index_fetch_flags =
+ top_of_pipe_flags |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
+ /* TODO: Emit EOS events for syncing PS/CS stages. */
- si_cs_emit_write_event_eop(cs,
- cmd_buffer->state.predicating,
- cmd_buffer->device->physical_device->rad_info.chip_class,
- radv_cmd_buffer_uses_mec(cmd_buffer),
- V_028A90_BOTTOM_OF_PIPE_TS, 0,
- 1, va, 2, value);
+ if (!(stageMask & ~top_of_pipe_flags)) {
+ /* Just need to sync the PFP engine. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else if (!(stageMask & ~post_index_fetch_flags)) {
+ /* Sync ME because PFP reads index and indirect buffers. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else {
+ /* Otherwise, sync all prior GPU work using an EOP event. */
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
+ cmd_buffer->gfx9_eop_bug_va);
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
}
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radeon_winsys_cs *cs = cmd_buffer->cs;
+ struct radv_barrier_info info;
- for (unsigned i = 0; i < eventCount; ++i) {
- RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = radv_buffer_get_va(event->bo);
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
- si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
- assert(cmd_buffer->cs->cdw <= cdw_max);
- }
+void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
+}
+/* VK_EXT_conditional_rendering */
+void vkCmdBeginConditionalRenderingEXT(
+ VkCommandBuffer commandBuffer,
+ const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
+ bool inverted;
+ uint64_t va;
- for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
+ va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
- radv_handle_image_transition(cmd_buffer, image,
- pImageMemoryBarriers[i].oldLayout,
- pImageMemoryBarriers[i].newLayout,
- pImageMemoryBarriers[i].srcQueueFamilyIndex,
- pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange,
- 0);
- }
+ inverted = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
+
+ /* Enable predication for this command buffer. */
+ si_emit_set_predication_state(cmd_buffer, inverted, va);
+ cmd_buffer->state.predicating = true;
+
+ /* Store conditional rendering user info. */
+ cmd_buffer->state.predication_type = inverted;
+ cmd_buffer->state.predication_va = va;
+}
+
+void vkCmdEndConditionalRenderingEXT(
+ VkCommandBuffer commandBuffer)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ /* Disable predication for this command buffer. */
+ si_emit_set_predication_state(cmd_buffer, false, 0);
+ cmd_buffer->state.predicating = false;
- /* TODO: figure out how to do memory barriers without waiting */
- cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
- RADV_CMD_FLAG_INV_GLOBAL_L2 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_SMEM_L1;
+ /* Reset conditional rendering user info. */
+ cmd_buffer->state.predication_type = -1;
+ cmd_buffer->state.predication_va = 0;
}