},
};
-void
+static void
radv_dynamic_state_copy(struct radv_dynamic_state *dest,
const struct radv_dynamic_state *src,
uint32_t copy_mask)
{
+ /* Make sure to copy the number of viewports/scissors because they can
+ * only be specified at pipeline creation time.
+ */
+ dest->viewport.count = src->viewport.count;
+ dest->scissor.count = src->scissor.count;
+
if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
- dest->viewport.count = src->viewport.count;
typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
src->viewport.count);
}
if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
- dest->scissor.count = src->scissor.count;
typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
src->scissor.count);
}
static void
radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer)
{
- if (cmd_buffer->device->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
+ if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
enum radv_cmd_flush_bits flags;
/* Force wait for graphics/compute engines to be idle. */
radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2);
}
-/* 12.4 fixed-point */
-static unsigned radv_pack_float_12p4(float x)
-{
- return x <= 0 ? 0 :
- x >= 4096 ? 0xffff : x * 16;
-}
-
struct ac_userdata_info *
radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
int idx)
{
+ if (stage == MESA_SHADER_VERTEX) {
+ if (pipeline->shaders[MESA_SHADER_VERTEX])
+ return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
+ if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
+ return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
+ if (pipeline->shaders[MESA_SHADER_GEOMETRY])
+ return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
+ }
return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
}
int idx, uint64_t va)
{
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
- uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
if (loc->sgpr_idx == -1)
return;
assert(loc->num_sgprs == 2);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]);
- radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa);
- radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
+ radeon_set_context_reg(cmd_buffer->cs, R_028804_DB_EQAA, ms->db_eqaa);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
return;
- radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2);
+ radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions) {
uint32_t offset;
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_FRAGMENT, AC_UD_PS_SAMPLE_POS_OFFSET);
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_FRAGMENT, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_FRAGMENT, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
if (loc->sgpr_idx == -1)
return;
assert(loc->num_sgprs == 1);
radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL,
raster->pa_cl_clip_cntl);
-
radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0,
raster->spi_interp_control);
-
- radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2);
- unsigned tmp = (unsigned)(1.0 * 8.0);
- radeon_emit(cmd_buffer->cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp));
- radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) |
- S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */
-
radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL,
raster->pa_su_vtx_cntl);
-
radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL,
raster->pa_su_sc_mode_cntl);
}
{
struct radv_shader_variant *vs;
- assert (pipeline->shaders[MESA_SHADER_VERTEX]);
+ radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
+ /* Skip shaders merged into HS/GS */
vs = pipeline->shaders[MESA_SHADER_VERTEX];
+ if (!vs)
+ return;
if (vs->info.vs.as_ls)
radv_emit_hw_ls(cmd_buffer, vs);
radv_emit_hw_es(cmd_buffer, vs, &vs->info.vs.es_info);
else
radv_emit_hw_vs(cmd_buffer, pipeline, vs, &vs->info.vs.outinfo);
-
- radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, pipeline->graphics.vgt_primitiveid_en);
}
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_CTRL, AC_UD_TCS_OFFCHIP_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_CTRL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_CTRL, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
assert(loc->num_sgprs == 4);
assert(!loc->indirect);
radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 4);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_TESS_EVAL, AC_UD_TES_OFFCHIP_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_EVAL, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_TESS_EVAL, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
assert(loc->num_sgprs == 1);
assert(!loc->indirect);
loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX, AC_UD_VS_LS_TCS_IN_LAYOUT);
if (loc->sgpr_idx != -1) {
- uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(MESA_SHADER_VERTEX, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
assert(loc->num_sgprs == 1);
assert(!loc->indirect);
}
}
-static void polaris_set_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
- struct radv_pipeline *pipeline)
+static void
+radv_emit_vgt_vertex_reuse(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_pipeline *pipeline)
{
- uint32_t vtx_reuse_depth = 30;
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+
if (cmd_buffer->device->physical_device->rad_info.family < CHIP_POLARIS10)
return;
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL]) {
- if (pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD)
- vtx_reuse_depth = 14;
- }
- radeon_set_context_reg(cmd_buffer->cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
+ radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ pipeline->graphics.vtx_reuse_depth);
}
static void
radv_emit_tess_shaders(cmd_buffer, pipeline);
radv_emit_geometry_shader(cmd_buffer, pipeline);
radv_emit_fragment_shader(cmd_buffer, pipeline);
- polaris_set_vgt_vertex_reuse(cmd_buffer, pipeline);
+ radv_emit_vgt_vertex_reuse(cmd_buffer, pipeline);
cmd_buffer->scratch_size_needed =
MAX2(cmd_buffer->scratch_size_needed,
radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
{
uint32_t count = cmd_buffer->state.dynamic.scissor.count;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
+ si_emit_cache_flush(cmd_buffer);
+ }
si_write_scissors(cmd_buffer->cs, 0, count,
cmd_buffer->state.dynamic.scissor.scissors,
cmd_buffer->state.dynamic.viewport.viewports,
}
}
+static void
+radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
+{
+ struct radeon_winsys_cs *cs = cmd_buffer->cs;
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
+ radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
+ 2, cmd_buffer->state.index_type);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ radeon_emit(cs, cmd_buffer->state.index_type);
+ }
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
+ radeon_emit(cs, cmd_buffer->state.index_va);
+ radeon_emit(cs, cmd_buffer->state.index_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
+ radeon_emit(cs, cmd_buffer->state.max_index_count);
+}
+
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
{
uint32_t db_count_control;
if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE |
RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS))
radv_emit_depth_biais(cmd_buffer);
-
- cmd_buffer->state.dirty = 0;
}
static void
gl_shader_stage stage)
{
struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
return;
if ((cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline || cmd_buffer->state.vb_dirty) &&
cmd_buffer->state.pipeline->vertex_elements.count &&
- cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.has_vertex_buffers) {
+ radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
unsigned vb_offset;
void *vb_ptr;
if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS)
radv_emit_framebuffer_state(cmd_buffer);
+ if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
+ radv_emit_index_buffer(cmd_buffer);
+
ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw, indirect_draw, draw_vertex_count);
if (cmd_buffer->state.last_ia_multi_vgt_param != ia_multi_vgt_param) {
if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
assert(cmd_buffer->cs->cdw <= cdw_max);
si_emit_cache_flush(cmd_buffer);
+
+ cmd_buffer->state.dirty = 0;
}
static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
}
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(pBeginInfo->pInheritanceInfo);
cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
- radv_mark_descriptor_sets_dirty(cmd_buffer);
-
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
+ if (cmd_buffer->state.compute_pipeline == pipeline)
+ return;
+ radv_mark_descriptor_sets_dirty(cmd_buffer);
+
cmd_buffer->state.compute_pipeline = pipeline;
cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
break;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ if (cmd_buffer->state.pipeline == pipeline)
+ return;
+ radv_mark_descriptor_sets_dirty(cmd_buffer);
+
cmd_buffer->state.pipeline = pipeline;
if (!pipeline)
break;
{
RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
+ assert(commandBufferCount > 0);
+
/* Emit pending flushes on primary prior to executing secondary */
si_emit_cache_flush(primary);
}
primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
- primary->state.emitted_pipeline = secondary->state.emitted_pipeline;
- primary->state.emitted_compute_pipeline = secondary->state.emitted_compute_pipeline;
- primary->state.last_primitive_reset_en = secondary->state.last_primitive_reset_en;
- primary->state.last_primitive_reset_index = secondary->state.last_primitive_reset_index;
- }
- /* if we execute secondary we need to mark some stuff to reset dirty */
- if (commandBufferCount) {
- primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
- primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL;
- radv_mark_descriptor_sets_dirty(primary);
+ /* When the secondary command buffer is compute only we don't
+ * need to re-emit the current graphics pipeline.
+ */
+ if (secondary->state.emitted_pipeline) {
+ primary->state.emitted_pipeline =
+ secondary->state.emitted_pipeline;
+ }
+
+ /* When the secondary command buffer is graphics only we don't
+ * need to re-emit the current compute pipeline.
+ */
+ if (secondary->state.emitted_compute_pipeline) {
+ primary->state.emitted_compute_pipeline =
+ secondary->state.emitted_compute_pipeline;
+ }
+
+ /* Only re-emit the draw packets when needed. */
+ if (secondary->state.last_primitive_reset_en != -1) {
+ primary->state.last_primitive_reset_en =
+ secondary->state.last_primitive_reset_en;
+ }
+
+ if (secondary->state.last_primitive_reset_index) {
+ primary->state.last_primitive_reset_index =
+ secondary->state.last_primitive_reset_index;
+ }
+
+ if (secondary->state.last_ia_multi_vgt_param) {
+ primary->state.last_ia_multi_vgt_param =
+ secondary->state.last_ia_multi_vgt_param;
+ }
}
+
+ /* After executing commands from secondary buffers we have to dirty
+ * some states.
+ */
+ primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
+ RADV_CMD_DIRTY_INDEX_BUFFER |
+ RADV_CMD_DIRTY_DYNAMIC_ALL;
+ radv_mark_descriptor_sets_dirty(primary);
}
VkResult radv_CreateCommandPool(
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
- uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
+ uint32_t base_reg = radv_shader_stage_to_user_data_0(stage, cmd_buffer->device->physical_device->rad_info.chip_class, radv_pipeline_has_gs(pipeline), radv_pipeline_has_tess(pipeline));
radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
}
radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
radeon_emit(cmd_buffer->cs, index_va);
- radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF);
+ radeon_emit(cmd_buffer->cs, index_va >> 32);
radeon_emit(cmd_buffer->cs, index_count);
radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
}
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 26 * MAX_VIEWS);
- if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_03090C_VGT_INDEX_TYPE,
- 2, cmd_buffer->state.index_type);
- } else {
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
- }
-
assert(cmd_buffer->state.pipeline->graphics.vtx_base_sgpr);
radeon_set_sh_reg_seq(cmd_buffer->cs, cmd_buffer->state.pipeline->graphics.vtx_base_sgpr,
cmd_buffer->state.pipeline->graphics.vtx_emit_num);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
- bool draw_id_enable = cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX]->info.info.vs.needs_draw_id;
+ bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
assert(base_reg);
if (count_buffer) {
count_va = radv_buffer_get_va(count_buffer->bo);
count_va += count_offset + count_buffer->offset;
+
+ cmd_buffer->device->ws->cs_add_buffer(cs, count_buffer->bo, 8);
}
if (!draw_count)
uint32_t stride)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- uint64_t index_va;
- radv_cmd_buffer_flush_state(cmd_buffer, true, false, true, 0);
- index_va = cmd_buffer->state.index_va;
+ radv_cmd_buffer_flush_state(cmd_buffer, true, false, true, 0);
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 31 * MAX_VIEWS);
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type);
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0));
- radeon_emit(cmd_buffer->cs, index_va);
- radeon_emit(cmd_buffer->cs, index_va >> 32);
-
- radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
- radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
-
radv_emit_indirect_draw(cmd_buffer, buffer, offset,
countBuffer, countBufferOffset, maxDrawCount, stride, true);
struct radeon_winsys *ws = cmd_buffer->device->ws;
struct radeon_winsys_cs *cs = cmd_buffer->cs;
struct ac_userdata_info *loc;
+ unsigned dispatch_initiator;
uint8_t grid_used;
grid_used = compute_shader->info.info.cs.grid_components_used;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
+ dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
+ S_00B800_FORCE_START_AT_000(1);
+
+ if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
+ /* If the KMD allows it (there is a KMD hw register for it),
+ * allow launching waves out-of-order.
+ */
+ dispatch_initiator |= S_00B800_ORDER_MODE(1);
+ }
+
if (info->indirect) {
uint64_t va = radv_buffer_get_va(info->indirect->bo);
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
} else {
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, 0);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
}
} else {
unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
- unsigned dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1);
if (info->unaligned) {
unsigned *cs_block_size = compute_shader->info.cs.block_size;
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
if (image->fmask.size)
VkImageLayout dst_layout,
unsigned src_queue_mask,
unsigned dst_queue_mask,
- const VkImageSubresourceRange *range,
- VkImageAspectFlags pending_clears)
+ const VkImageSubresourceRange *range)
{
if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
radv_initialize_dcc(cmd_buffer, image, 0x20202020u);
dst_queue_mask, range,
pending_clears);
- if (image->cmask.size)
+ if (image->cmask.size || image->fmask.size)
radv_handle_cmask_image_transition(cmd_buffer, image, src_layout,
dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
+ dst_queue_mask, range);
if (image->surface.dcc_size)
radv_handle_dcc_image_transition(cmd_buffer, image, src_layout,
dst_layout, src_queue_mask,
- dst_queue_mask, range,
- pending_clears);
+ dst_queue_mask, range);
}
void radv_CmdPipelineBarrier(
cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class,
false,
- EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0,
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
1, va, 2, value);
assert(cmd_buffer->cs->cdw <= cdw_max);