}
}
-unsigned
+void
tu6_emit_event_write(struct tu_cmd_buffer *cmd,
struct tu_cs *cs,
- enum vgt_event_type event,
- bool need_seqno)
+ enum vgt_event_type event)
{
- unsigned seqno = 0;
+ bool need_seqno = false;
+ switch (event) {
+ case CACHE_FLUSH_TS:
+ case WT_DONE_TS:
+ case RB_DONE_TS:
+ case PC_CCU_FLUSH_DEPTH_TS:
+ case PC_CCU_FLUSH_COLOR_TS:
+ case PC_CCU_RESOLVE_TS:
+ need_seqno = true;
+ break;
+ default:
+ break;
+ }
tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
if (need_seqno) {
tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- seqno = ++cmd->scratch_seqno;
- tu_cs_emit(cs, seqno);
+ tu_cs_emit(cs, 0);
}
-
- return seqno;
}
static void
-tu6_emit_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs,
+ enum tu_cmd_flush_bits flushes)
{
- tu6_emit_event_write(cmd, cs, 0x31, false);
+ /* Experiments show that invalidating CCU while it still has data in it
+ * doesn't work, so make sure to always flush before invalidating in case
+ * any data remains that hasn't yet been made available through a barrier.
+ * However it does seem to work for UCHE.
+ */
+ if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
+ TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
+ tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
+ if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
+ tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
+ if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
+ tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
+ if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
+ tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
+ if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
+ tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
+ if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
+ tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
+ if (flushes & TU_CMD_FLAG_WFI)
+ tu_cs_emit_wfi(cs);
}
+/* "Normal" cache flushes, that don't require any special handling */
+
static void
-tu6_emit_lrz_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs)
{
- tu6_emit_event_write(cmd, cs, LRZ_FLUSH, false);
+ tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
+ cmd_buffer->state.cache.flush_bits = 0;
}
-static void
-tu6_emit_wfi(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
+/* Renderpass cache flushes */
+
+void
+tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs)
{
- if (cmd->wait_for_idle) {
- tu_cs_emit_wfi(cs);
- cmd->wait_for_idle = false;
+ tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
+ cmd_buffer->state.renderpass_cache.flush_bits = 0;
+}
+
+/* Cache flushes for things that use the color/depth read/write path (i.e.
+ * blits and draws). This deals with changing CCU state as well as the usual
+ * cache flushing.
+ */
+
+void
+tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
+ struct tu_cs *cs,
+ enum tu_cmd_ccu_state ccu_state)
+{
+ enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
+
+ assert(ccu_state != TU_CMD_CCU_UNKNOWN);
+
+ /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
+ * the CCU may also contain data that we haven't flushed out yet, so we
+ * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
+ * emit a WFI as it isn't pipelined.
+ */
+ if (ccu_state != cmd_buffer->state.ccu_state) {
+ if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
+ flushes |=
+ TU_CMD_FLAG_CCU_FLUSH_COLOR |
+ TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+ cmd_buffer->state.cache.pending_flush_bits &= ~(
+ TU_CMD_FLAG_CCU_FLUSH_COLOR |
+ TU_CMD_FLAG_CCU_FLUSH_DEPTH);
+ }
+ flushes |=
+ TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
+ TU_CMD_FLAG_WFI;
+ cmd_buffer->state.cache.pending_flush_bits &= ~(
+ TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
+ TU_CMD_FLAG_CCU_INVALIDATE_DEPTH);
+ }
+
+ tu6_emit_flushes(cmd_buffer, cs, flushes);
+ cmd_buffer->state.cache.flush_bits = 0;
+
+ if (ccu_state != cmd_buffer->state.ccu_state) {
+ struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
+ tu_cs_emit_regs(cs,
+ A6XX_RB_CCU_CNTL(.offset =
+ ccu_state == TU_CMD_CCU_GMEM ?
+ phys_dev->ccu_offset_gmem :
+ phys_dev->ccu_offset_bypass,
+ .gmem = ccu_state == TU_CMD_CCU_GMEM));
+ cmd_buffer->state.ccu_state = ccu_state;
}
}
tu_cs_emit_regs(cs,
A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
- tu_cs_emit_regs(cs,
- A6XX_RB_RENDER_COMPONENTS(.dword = subpass->render_components));
- tu_cs_emit_regs(cs,
- A6XX_SP_FS_RENDER_COMPONENTS(.dword = subpass->render_components));
-
tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(fb->layers - 1));
}
tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5, 7);
tu_cs_emit(cs, cmd->state.tiling_config.pipe_sizes[tile->pipe] |
CP_SET_BIN_DATA5_0_VSC_N(tile->slot));
- tu_cs_emit_qw(cs, cmd->vsc_data.iova + tile->pipe * cmd->vsc_data_pitch);
- tu_cs_emit_qw(cs, cmd->vsc_data.iova + (tile->pipe * 4) + (32 * cmd->vsc_data_pitch));
- tu_cs_emit_qw(cs, cmd->vsc_data2.iova + (tile->pipe * cmd->vsc_data2_pitch));
+ tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + tile->pipe * cmd->vsc_draw_strm_pitch);
+ tu_cs_emit_qw(cs, cmd->vsc_draw_strm.iova + (tile->pipe * 4) + (32 * cmd->vsc_draw_strm_pitch));
+ tu_cs_emit_qw(cs, cmd->vsc_prim_strm.iova + (tile->pipe * cmd->vsc_prim_strm_pitch));
tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
tu_cs_emit(cs, 0x0);
tu_resolve_sysmem(cmd, cs, src, dst, fb->layers, &cmd->state.tiling_config.render_area);
}
+static void
+tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
+ struct tu_cs *cs,
+ const struct tu_subpass *subpass)
+{
+ if (subpass->resolve_attachments) {
+ /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
+ * Commands":
+ *
+ * End-of-subpass multisample resolves are treated as color
+ * attachment writes for the purposes of synchronization. That is,
+ * they are considered to execute in the
+ * VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
+ * their writes are synchronized with
+ * VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
+ * rendering within a subpass and any resolve operations at the end
+ * of the subpass occurs automatically, without need for explicit
+ * dependencies or pipeline barriers. However, if the resolve
+ * attachment is also used in a different subpass, an explicit
+ * dependency is needed.
+ *
+ * We use the CP_BLIT path for sysmem resolves, which is really a
+ * transfer command, so we have to manually flush similar to the gmem
+ * resolve case. However, a flush afterwards isn't needed because of the
+ * last sentence and the fact that we're in sysmem mode.
+ */
+ tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
+
+ /* Wait for the flushes to land before using the 2D engine */
+ tu_cs_emit_wfi(cs);
+
+ for (unsigned i = 0; i < subpass->color_count; i++) {
+ uint32_t a = subpass->resolve_attachments[i].attachment;
+ if (a == VK_ATTACHMENT_UNUSED)
+ continue;
+
+ tu6_emit_sysmem_resolve(cmd, cs, a,
+ subpass->color_attachments[i].attachment);
+ }
+ }
+}
+
static void
tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
{
const struct tu_physical_device *phys_dev = cmd->device->physical_device;
- tu6_emit_cache_flush(cmd, cs);
+ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UPDATE_CNTL, 0xfffff);
tu_cs_emit_regs(cs,
A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
+ cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9101, 0xffff00);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9107, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236, 1);
+ tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9236,
+ A6XX_VPC_UNKNOWN_9236_POINT_COORD_INVERT(0));
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_VPC_SO_OVERRIDE,
A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9801, 0);
- tu_cs_emit_write_reg(cs, REG_A6XX_PC_PRIMID_CNTL, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9980, 0);
tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9990, 0);
tu_cs_sanity_check(cs);
}
-static void
-tu6_cache_flush(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
-{
- unsigned seqno;
-
- seqno = tu6_emit_event_write(cmd, cs, RB_DONE_TS, true);
-
- tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
- tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
- CP_WAIT_REG_MEM_0_POLL_MEMORY);
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(seqno));
- tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
- tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
-
- seqno = tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS, true);
-
- tu_cs_emit_pkt7(cs, CP_WAIT_MEM_GTE, 4);
- tu_cs_emit(cs, CP_WAIT_MEM_GTE_0_RESERVED(0));
- tu_cs_emit_qw(cs, cmd->scratch_bo.iova);
- tu_cs_emit(cs, CP_WAIT_MEM_GTE_3_REF(seqno));
-}
-
static void
update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
{
tu_cs_emit_regs(cs,
A6XX_VSC_BIN_SIZE(.width = tiling->tile0.extent.width,
.height = tiling->tile0.extent.height),
- A6XX_VSC_SIZE_ADDRESS(.bo = &cmd->vsc_data,
- .bo_offset = 32 * cmd->vsc_data_pitch));
+ A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = &cmd->vsc_draw_strm,
+ .bo_offset = 32 * cmd->vsc_draw_strm_pitch));
tu_cs_emit_regs(cs,
A6XX_VSC_BIN_COUNT(.nx = tiling->tile_count.width,
tu_cs_emit(cs, tiling->pipe_config[i]);
tu_cs_emit_regs(cs,
- A6XX_VSC_PIPE_DATA2_ADDRESS(.bo = &cmd->vsc_data2),
- A6XX_VSC_PIPE_DATA2_PITCH(cmd->vsc_data2_pitch),
- A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(cmd->vsc_data2.size));
+ A6XX_VSC_PRIM_STRM_ADDRESS(.bo = &cmd->vsc_prim_strm),
+ A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
+ A6XX_VSC_PRIM_STRM_ARRAY_PITCH(cmd->vsc_prim_strm.size));
tu_cs_emit_regs(cs,
- A6XX_VSC_PIPE_DATA_ADDRESS(.bo = &cmd->vsc_data),
- A6XX_VSC_PIPE_DATA_PITCH(cmd->vsc_data_pitch),
- A6XX_VSC_PIPE_DATA_ARRAY_PITCH(cmd->vsc_data.size));
+ A6XX_VSC_DRAW_STRM_ADDRESS(.bo = &cmd->vsc_draw_strm),
+ A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
+ A6XX_VSC_DRAW_STRM_ARRAY_PITCH(cmd->vsc_draw_strm.size));
}
static void
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
- tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE_REG(i)));
+ tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_data_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(1 + cmd->vsc_draw_strm_pitch));
tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
- tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_SIZE2_REG(i)));
+ tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
- tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_data2_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch));
tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
tu_cs_emit_qw(cs, cmd->scratch_bo.iova + ctrl_offset(vsc_scratch));
- tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_data2_pitch));
+ tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(3 + cmd->vsc_prim_strm_pitch));
}
tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
* if (b0 set)..
*/
- /* b0 will be set if VSC_DATA or VSC_DATA2 overflow: */
+ /* b0 will be set if VSC_DRAW_STRM or VSC_PRIM_STRM overflow: */
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(OVERFLOW_FLAG_REG) |
A6XX_CP_REG_TEST_0_BIT(0) |
tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
tu_cs_emit(cs, UNK_2D);
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
- tu6_cache_flush(cmd, cs);
+ /* This flush is probably required because the VSC, which produces the
+ * visibility stream, is a client of UCHE, whereas the CP needs to read the
+ * visibility stream (without caching) to do draw skipping. The
+ * WFI+WAIT_FOR_ME combination guarantees that the binning commands
+ * submitted are finished before reading the VSC regs (in
+ * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
+ * part of draws).
+ */
+ tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
tu_cs_emit_wfi(cs);
tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
tu_cs_emit(cs, 0x0);
-
- cmd->wait_for_idle = false;
}
static void
tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
const struct VkRect2D *renderArea)
{
- const struct tu_physical_device *phys_dev = cmd->device->physical_device;
const struct tu_framebuffer *fb = cmd->state.framebuffer;
assert(fb->width > 0 && fb->height > 0);
tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
- tu6_emit_lrz_flush(cmd, cs);
+ tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x0);
- tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
- tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
- tu6_emit_wfi(cmd, cs);
- tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
+ tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
/* enable stream-out, with sysmem there is only one pass: */
tu_cs_emit_regs(cs,
/* Do any resolves of the last subpass. These are handled in the
* tile_store_ib in the gmem path.
*/
- const struct tu_subpass *subpass = cmd->state.subpass;
- if (subpass->resolve_attachments) {
- for (unsigned i = 0; i < subpass->color_count; i++) {
- uint32_t a = subpass->resolve_attachments[i].attachment;
- if (a != VK_ATTACHMENT_UNUSED)
- tu6_emit_sysmem_resolve(cmd, cs, a,
- subpass->color_attachments[i].attachment);
- }
- }
+ tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x0);
- tu6_emit_lrz_flush(cmd, cs);
-
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
+ tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
tu_cs_sanity_check(cs);
}
{
struct tu_physical_device *phys_dev = cmd->device->physical_device;
- tu6_emit_lrz_flush(cmd, cs);
+ tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
/* lrz clear? */
- tu6_emit_cache_flush(cmd, cs);
-
tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
tu_cs_emit(cs, 0x0);
- /* TODO: flushing with barriers instead of blindly always flushing */
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
- tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
- tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
-
- tu_cs_emit_wfi(cs);
- tu_cs_emit_regs(cs,
- A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_gmem, .gmem = 1));
+ tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
if (use_hw_binning(cmd)) {
tu6_emit_tile_select(cmd, cs, tile);
tu_cs_emit_call(cs, &cmd->draw_cs);
- cmd->wait_for_idle = true;
if (use_hw_binning(cmd)) {
tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
tu_cs_emit_regs(cs,
A6XX_GRAS_LRZ_CNTL(0));
- tu6_emit_lrz_flush(cmd, cs);
+ tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
- tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS, true);
+ tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
tu_cs_sanity_check(cs);
}
tu6_sysmem_render_begin(cmd, &cmd->cs, &tiling->render_area);
tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
- cmd->wait_for_idle = true;
tu6_sysmem_render_end(cmd, &cmd->cs);
}
goto fail_scratch_bo;
/* TODO: resize on overflow */
- cmd_buffer->vsc_data_pitch = device->vsc_data_pitch;
- cmd_buffer->vsc_data2_pitch = device->vsc_data2_pitch;
- cmd_buffer->vsc_data = device->vsc_data;
- cmd_buffer->vsc_data2 = device->vsc_data2;
+ cmd_buffer->vsc_draw_strm_pitch = device->vsc_draw_strm_pitch;
+ cmd_buffer->vsc_prim_strm_pitch = device->vsc_prim_strm_pitch;
+ cmd_buffer->vsc_draw_strm = device->vsc_draw_strm;
+ cmd_buffer->vsc_prim_strm = device->vsc_prim_strm;
return VK_SUCCESS;
list_del(&cmd_buffer->pool_link);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
tu_cs_finish(&cmd_buffer->cs);
static VkResult
tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
{
- cmd_buffer->wait_for_idle = true;
-
cmd_buffer->record_result = VK_SUCCESS;
tu_bo_list_reset(&cmd_buffer->bo_list);
tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
tu_cs_reset(&cmd_buffer->sub_cs);
- for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
+ for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
cmd_buffer->descriptors[i].valid = 0;
cmd_buffer->descriptors[i].push_dirty = false;
}
return tu_reset_cmd_buffer(cmd_buffer);
}
+/* Initialize the cache, assuming all necessary flushes have happened but *not*
+ * invalidations.
+ */
+static void
+tu_cache_init(struct tu_cache_state *cache)
+{
+ cache->flush_bits = 0;
+ cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
+}
+
VkResult
tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo)
}
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
+ tu_cache_init(&cmd_buffer->state.cache);
+ tu_cache_init(&cmd_buffer->state.renderpass_cache);
cmd_buffer->usage_flags = pBeginInfo->flags;
tu_cs_begin(&cmd_buffer->cs);
tu_cs_begin(&cmd_buffer->draw_cs);
tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
- cmd_buffer->scratch_seqno = 0;
-
/* setup initial configuration into command buffer */
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
switch (cmd_buffer->queue_family_index) {
default:
break;
}
- } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
- (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
- assert(pBeginInfo->pInheritanceInfo);
- cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
- cmd_buffer->state.subpass = &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+ } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(pBeginInfo->pInheritanceInfo);
+ cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
+ cmd_buffer->state.subpass =
+ &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
+ } else {
+ /* When executing in the middle of another command buffer, the CCU
+ * state is unknown.
+ */
+ cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
+ }
}
cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
return VK_SUCCESS;
}
+/* Sets vertex buffers to HW binding points. We emit VBs in SDS (so that bin
+ * rendering can skip over unused state), so we need to collect all the
+ * bindings together into a single state emit at draw time.
+ */
void
tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
uint32_t firstBinding,
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
- cmd->state.vb.buffers[firstBinding + i] =
- tu_buffer_from_handle(pBuffers[i]);
+ struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
+
+ cmd->state.vb.buffers[firstBinding + i] = buf;
cmd->state.vb.offsets[firstBinding + i] = pOffsets[i];
+
+ tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
}
- /* VB states depend on VkPipelineVertexInputStateCreateInfo */
cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
}
cmd->state.dirty |= TU_CMD_DIRTY_PUSH_CONSTANTS;
}
+/* Flush everything which has been made available but we haven't actually
+ * flushed yet.
+ */
+static void
+tu_flush_all_pending(struct tu_cache_state *cache)
+{
+ cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+ cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
+}
+
VkResult
tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
- if (cmd_buffer->scratch_seqno) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
- MSM_SUBMIT_BO_WRITE);
+ /* We currently flush CCU at the end of the command buffer, like
+ * what the blob does. There's implicit synchronization around every
+ * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
+ * know yet if this command buffer will be the last in the submit so we
+ * have to defensively flush everything else.
+ *
+ * TODO: We could definitely do better than this, since these flushes
+ * aren't required by Vulkan, but we'd need kernel support to do that.
+ * Ideally, we'd like the kernel to flush everything afterwards, so that we
+ * wouldn't have to do any flushes here, and when submitting multiple
+ * command buffers there wouldn't be any unnecessary flushes in between.
+ */
+ if (cmd_buffer->state.pass) {
+ tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
+ tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
+ } else {
+ tu_flush_all_pending(&cmd_buffer->state.cache);
+ cmd_buffer->state.cache.flush_bits |=
+ TU_CMD_FLAG_CCU_FLUSH_COLOR |
+ TU_CMD_FLAG_CCU_FLUSH_DEPTH;
+ tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
}
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->scratch_bo,
+ MSM_SUBMIT_BO_WRITE);
+
if (cmd_buffer->use_vsc_data) {
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data,
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_draw_strm,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
- tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_data2,
+ tu_bo_list_add(&cmd_buffer->bo_list, &cmd_buffer->vsc_prim_strm,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
}
break;
}
+ /* If the new pipeline requires more VBs than we had previously set up, we
+ * need to re-emit them in SDS. If it requires the same set or fewer, we
+ * can just re-use the old SDS.
+ */
+ if (pipeline->vi.bindings_used & ~cmd->vertex_bindings_set)
+ cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
+
tu_bo_list_add(&cmd->bo_list, &pipeline->program.binary_bo,
MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
for (uint32_t i = 0; i < pipeline->cs.bo_count; i++) {
tu6_emit_sample_locations(&cmd->draw_cs, pSampleLocationsInfo);
}
+static void
+tu_flush_for_access(struct tu_cache_state *cache,
+ enum tu_cmd_access_mask src_mask,
+ enum tu_cmd_access_mask dst_mask)
+{
+ enum tu_cmd_flush_bits flush_bits = 0;
+
+ if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
+ cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
+ }
+
+#define SRC_FLUSH(domain, flush, invalidate) \
+ if (src_mask & TU_ACCESS_##domain##_WRITE) { \
+ cache->pending_flush_bits |= TU_CMD_FLAG_##flush | \
+ (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
+ }
+
+ SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+ SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_FLUSH
+
+#define SRC_INCOHERENT_FLUSH(domain, flush, invalidate) \
+ if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) { \
+ flush_bits |= TU_CMD_FLAG_##flush; \
+ cache->pending_flush_bits |= \
+ (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate); \
+ }
+
+ SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_INCOHERENT_FLUSH
+
+ if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE)) {
+ flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+ }
+
+#define DST_FLUSH(domain, flush, invalidate) \
+ if (dst_mask & (TU_ACCESS_##domain##_READ | \
+ TU_ACCESS_##domain##_WRITE)) { \
+ flush_bits |= cache->pending_flush_bits & \
+ (TU_CMD_FLAG_##invalidate | \
+ (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
+ }
+
+ DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+ DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_FLUSH
+
+#define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
+ if (dst_mask & (TU_ACCESS_##domain##_READ | \
+ TU_ACCESS_##domain##_WRITE)) { \
+ flush_bits |= TU_CMD_FLAG_##invalidate | \
+ (cache->pending_flush_bits & \
+ (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
+ }
+
+ DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_INCOHERENT_FLUSH
+
+ if (dst_mask & TU_ACCESS_WFI_READ) {
+ flush_bits |= TU_CMD_FLAG_WFI;
+ }
+
+ cache->flush_bits |= flush_bits;
+ cache->pending_flush_bits &= ~flush_bits;
+}
+
+static enum tu_cmd_access_mask
+vk2tu_access(VkAccessFlags flags, bool gmem)
+{
+ enum tu_cmd_access_mask mask = 0;
+
+ /* If the GPU writes a buffer that is then read by an indirect draw
+ * command, we theoretically need a WFI + WAIT_FOR_ME combination to
+ * wait for the writes to complete. The WAIT_FOR_ME is performed as part
+ * of the draw by the firmware, so we just need to execute a WFI.
+ */
+ if (flags &
+ (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_WFI_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP, I think */
+ VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
+ VK_ACCESS_HOST_READ_BIT | /* sysmem by definition */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_SYSMEM_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_HOST_WRITE_BIT |
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT | /* Write performed by CP, I think */
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ }
+
+ if (flags &
+ (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
+ VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
+ /* TODO: Is there a no-cache bit for textures so that we can ignore
+ * these?
+ */
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
+ VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_UCHE_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
+ VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_UCHE_WRITE;
+ }
+
+ /* When using GMEM, the CCU is always flushed automatically to GMEM, and
+ * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
+ * previous writes in sysmem mode when transitioning to GMEM. Therefore we
+ * can ignore CCU and pretend that color attachments and transfers use
+ * sysmem directly.
+ */
+
+ if (flags &
+ (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ if (gmem)
+ mask |= TU_ACCESS_SYSMEM_READ;
+ else
+ mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ if (gmem)
+ mask |= TU_ACCESS_SYSMEM_READ;
+ else
+ mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+ }
+ }
+
+ if (flags &
+ (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+ }
+ }
+
+ /* When the dst access is a transfer read/write, it seems we sometimes need
+ * to insert a WFI after any flushes, to guarantee that the flushes finish
+ * before the 2D engine starts. However the opposite (i.e. a WFI after
+ * CP_BLIT and before any subsequent flush) does not seem to be needed, and
+ * the blob doesn't emit such a WFI.
+ */
+
+ if (flags &
+ (VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_COLOR_WRITE;
+ }
+ mask |= TU_ACCESS_WFI_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
+ }
+
+ return mask;
+}
+
+
void
tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
assert(commandBufferCount > 0);
+ /* Emit any pending flushes. */
+ if (cmd->state.pass) {
+ tu_flush_all_pending(&cmd->state.renderpass_cache);
+ tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
+ } else {
+ tu_flush_all_pending(&cmd->state.cache);
+ tu_emit_cache_flush(cmd, &cmd->cs);
+ }
+
for (uint32_t i = 0; i < commandBufferCount; i++) {
TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
}
}
cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
+
+ /* After executing secondary command buffers, there may have been arbitrary
+ * flushes executed, so when we encounter a pipeline barrier with a
+ * srcMask, we have to assume that we need to invalidate. Therefore we need
+ * to re-initialize the cache with all pending invalidate bits set.
+ */
+ if (cmd->state.pass) {
+ tu_cache_init(&cmd->state.renderpass_cache);
+ } else {
+ tu_cache_init(&cmd->state.cache);
+ }
}
VkResult
}
}
+static void
+tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
+ const struct tu_subpass_barrier *barrier,
+ bool external)
+{
+ /* Note: we don't know until the end of the subpass whether we'll use
+ * sysmem, so assume sysmem here to be safe.
+ */
+ struct tu_cache_state *cache =
+ external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
+ enum tu_cmd_access_mask src_flags =
+ vk2tu_access(barrier->src_access_mask, false);
+ enum tu_cmd_access_mask dst_flags =
+ vk2tu_access(barrier->dst_access_mask, false);
+
+ if (barrier->incoherent_ccu_color)
+ src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+ if (barrier->incoherent_ccu_depth)
+ src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+
+ tu_flush_for_access(cache, src_flags, dst_flags);
+}
+
void
tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
tu_cmd_update_tiling_config(cmd, &pRenderPassBegin->renderArea);
tu_cmd_prepare_tile_store_ib(cmd);
+ /* Note: because this is external, any flushes will happen before draw_cs
+ * gets called. However deferred flushes could have to happen later as part
+ * of the subpass.
+ */
+ tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
+ cmd->state.renderpass_cache.pending_flush_bits =
+ cmd->state.cache.pending_flush_bits;
+ cmd->state.renderpass_cache.flush_bits = 0;
+
tu_emit_load_clear(cmd, pRenderPassBegin);
tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
- /* Emit flushes so that input attachments will read the correct value.
- * TODO: use subpass dependencies to flush or not
- */
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
-
- if (subpass->resolve_attachments) {
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
-
- for (unsigned i = 0; i < subpass->color_count; i++) {
- uint32_t a = subpass->resolve_attachments[i].attachment;
- if (a == VK_ATTACHMENT_UNUSED)
- continue;
-
- tu6_emit_sysmem_resolve(cmd, cs, a,
- subpass->color_attachments[i].attachment);
- }
-
- tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
- }
+ tu6_emit_sysmem_resolves(cmd, cs, subpass);
tu_cond_exec_end(cs);
- /* subpass->input_count > 0 then texture cache invalidate is likely to be needed */
- if (cmd->state.subpass->input_count)
- tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
+ /* Handle dependencies for the next subpass */
+ tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
/* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
tu6_emit_zs(cmd, cmd->state.subpass, cs);
{
TU_DRAW_STATE_PROGRAM,
TU_DRAW_STATE_PROGRAM_BINNING,
+ TU_DRAW_STATE_VB,
TU_DRAW_STATE_VI,
TU_DRAW_STATE_VI_BINNING,
TU_DRAW_STATE_VP,
struct tu_cs_entry ib;
};
-static inline uint32_t
-tu6_stage2opcode(gl_shader_stage type)
-{
- switch (type) {
- case MESA_SHADER_VERTEX:
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_TESS_EVAL:
- case MESA_SHADER_GEOMETRY:
- return CP_LOAD_STATE6_GEOM;
- case MESA_SHADER_FRAGMENT:
- case MESA_SHADER_COMPUTE:
- case MESA_SHADER_KERNEL:
- return CP_LOAD_STATE6_FRAG;
- default:
- unreachable("bad shader type");
- }
-}
-
-static inline enum a6xx_state_block
-tu6_stage2shadersb(gl_shader_stage type)
-{
- switch (type) {
- case MESA_SHADER_VERTEX:
- return SB6_VS_SHADER;
- case MESA_SHADER_GEOMETRY:
- return SB6_GS_SHADER;
- case MESA_SHADER_FRAGMENT:
- return SB6_FS_SHADER;
- case MESA_SHADER_COMPUTE:
- case MESA_SHADER_KERNEL:
- return SB6_CS_SHADER;
- default:
- unreachable("bad shader type");
- return ~0;
- }
-}
-
static void
tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
struct tu_descriptor_state *descriptors_state,
return VK_SUCCESS;
}
+static struct tu_cs_entry
+tu6_emit_vertex_buffers(struct tu_cmd_buffer *cmd,
+ const struct tu_pipeline *pipeline)
+{
+ struct tu_cs cs;
+ tu_cs_begin_sub_stream(&cmd->sub_cs, 4 * MAX_VBS, &cs);
+
+ int binding;
+ for_each_bit(binding, pipeline->vi.bindings_used) {
+ const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
+ const VkDeviceSize offset = buf->bo_offset +
+ cmd->state.vb.offsets[binding];
+
+ tu_cs_emit_regs(&cs,
+ A6XX_VFD_FETCH_BASE(binding, .bo = buf->bo, .bo_offset = offset),
+ A6XX_VFD_FETCH_SIZE(binding, buf->size - offset));
+
+ }
+
+ cmd->vertex_bindings_set = pipeline->vi.bindings_used;
+
+ return tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
+}
+
static VkResult
tu6_emit_descriptor_sets(struct tu_cmd_buffer *cmd,
const struct tu_pipeline *pipeline,
tu6_emit_scissor(cs, &cmd->state.dynamic.scissor.scissors[0]);
}
- if (cmd->state.dirty &
- (TU_CMD_DIRTY_PIPELINE | TU_CMD_DIRTY_VERTEX_BUFFERS)) {
- for (uint32_t i = 0; i < pipeline->vi.count; i++) {
- const uint32_t binding = pipeline->vi.bindings[i];
- const struct tu_buffer *buf = cmd->state.vb.buffers[binding];
- const VkDeviceSize offset = buf->bo_offset +
- cmd->state.vb.offsets[binding];
- const VkDeviceSize size =
- offset < buf->size ? buf->size - offset : 0;
-
- tu_cs_emit_regs(cs,
- A6XX_VFD_FETCH_BASE(i, .bo = buf->bo, .bo_offset = offset),
- A6XX_VFD_FETCH_SIZE(i, size));
- }
- }
-
if (cmd->state.dirty & TU_CMD_DIRTY_PIPELINE) {
draw_state_groups[draw_state_group_count++] =
(struct tu_draw_state_group) {
};
}
+ if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
+ draw_state_groups[draw_state_group_count++] =
+ (struct tu_draw_state_group) {
+ .id = TU_DRAW_STATE_VB,
+ .enable_mask = ENABLE_ALL,
+ .ib = tu6_emit_vertex_buffers(cmd, pipeline)
+ };
+ }
+
if (cmd->state.dirty & TU_CMD_DIRTY_STREAMOUT_BUFFERS)
tu6_emit_streamout(cmd, cs);
tu_cs_sanity_check(cs);
/* track BOs */
- if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) {
- for (uint32_t i = 0; i < MAX_VBS; i++) {
- const struct tu_buffer *buf = cmd->state.vb.buffers[i];
- if (buf)
- tu_bo_list_add(&cmd->bo_list, buf->bo, MSM_SUBMIT_BO_READ);
- }
- }
if (cmd->state.dirty & TU_CMD_DIRTY_DESCRIPTOR_SETS) {
unsigned i;
for_each_bit(i, descriptors_state->valid) {
struct tu_cs *cs = &cmd->draw_cs;
VkResult result;
+ tu_emit_cache_flush_renderpass(cmd, cs);
+
result = tu6_bind_draw_states(cmd, cs, draw);
if (result != VK_SUCCESS) {
cmd->record_result = result;
if (cmd->state.streamout_enabled) {
for (unsigned i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
if (cmd->state.streamout_enabled & (1 << i))
- tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i, false);
+ tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
}
}
- cmd->wait_for_idle = true;
-
tu_cs_sanity_check(cs);
}
&cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
VkResult result;
+ /* TODO: We could probably flush less if we add a compute_flush_bits
+ * bitfield.
+ */
+ tu_emit_cache_flush(cmd, cs);
+
if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_PIPELINE)
tu_cs_emit_ib(cs, &pipeline->program.state_ib);
if (ib.size)
tu_cs_emit_ib(cs, &ib);
- if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS)
+ if ((cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS) &&
+ pipeline->load_state.state_ib.size > 0) {
tu_cs_emit_ib(cs, &pipeline->load_state.state_ib);
+ }
cmd->state.dirty &=
~(TU_CMD_DIRTY_COMPUTE_DESCRIPTOR_SETS | TU_CMD_DIRTY_COMPUTE_PIPELINE);
}
tu_cs_emit_wfi(cs);
-
- tu6_emit_cache_flush(cmd, cs);
}
void
tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
+ cmd_buffer->state.cache.pending_flush_bits |=
+ cmd_buffer->state.renderpass_cache.pending_flush_bits;
+ tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
+
cmd_buffer->state.pass = NULL;
cmd_buffer->state.subpass = NULL;
cmd_buffer->state.framebuffer = NULL;
const VkImageMemoryBarrier *pImageMemoryBarriers,
const struct tu_barrier_info *info)
{
- /* renderpass case is only for subpass self-dependencies
- * which means syncing the render output with texture cache
- * note: only the CACHE_INVALIDATE is needed in GMEM mode
- * and in sysmem mode we might not need either color/depth flush
+ struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
+ VkAccessFlags srcAccessMask = 0;
+ VkAccessFlags dstAccessMask = 0;
+
+ for (uint32_t i = 0; i < memoryBarrierCount; i++) {
+ srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
+ dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
+ }
+
+ for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
+ srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
+ dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
+ }
+
+ enum tu_cmd_access_mask src_flags = 0;
+ enum tu_cmd_access_mask dst_flags = 0;
+
+ for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
+ TU_FROM_HANDLE(tu_image, image, pImageMemoryBarriers[i].image);
+ VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
+ /* For non-linear images, PREINITIALIZED is the same as UNDEFINED */
+ if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
+ (image->tiling != VK_IMAGE_TILING_LINEAR &&
+ old_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
+ /* The underlying memory for this image may have been used earlier
+ * within the same queue submission for a different image, which
+ * means that there may be old, stale cache entries which are in the
+ * "wrong" location, which could cause problems later after writing
+ * to the image. We don't want these entries being flushed later and
+ * overwriting the actual image, so we need to flush the CCU.
+ */
+ src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+ }
+ srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
+ dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
+ }
+
+ /* Inside a renderpass, we don't know yet whether we'll be using sysmem
+ * so we have to use the sysmem flushes.
*/
- if (cmd->state.pass) {
- tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_COLOR_TS, true);
- tu6_emit_event_write(cmd, &cmd->draw_cs, PC_CCU_FLUSH_DEPTH_TS, true);
- tu6_emit_event_write(cmd, &cmd->draw_cs, CACHE_INVALIDATE, false);
- return;
+ bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
+ !cmd->state.pass;
+ src_flags |= vk2tu_access(srcAccessMask, gmem);
+ dst_flags |= vk2tu_access(dstAccessMask, gmem);
+
+ struct tu_cache_state *cache =
+ cmd->state.pass ? &cmd->state.renderpass_cache : &cmd->state.cache;
+ tu_flush_for_access(cache, src_flags, dst_flags);
+
+ for (uint32_t i = 0; i < info->eventCount; i++) {
+ TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
+
+ tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+
+ tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+ CP_WAIT_REG_MEM_0_POLL_MEMORY);
+ tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
+ tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
}
}
}
static void
-write_event(struct tu_cmd_buffer *cmd, struct tu_event *event, unsigned value)
+write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
+ VkPipelineStageFlags stageMask, unsigned value)
{
struct tu_cs *cs = &cmd->cs;
- tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+ /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
+ assert(!cmd->state.pass);
- /* TODO: any flush required before/after ? */
+ tu_emit_cache_flush(cmd, cs);
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
- tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
- tu_cs_emit(cs, value);
+ tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_WRITE);
+
+ /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
+ * read by the CP, so the draw indirect stage counts as top-of-pipe too.
+ */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+
+ if (!(stageMask & ~top_of_pipe_flags)) {
+ tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
+ tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
+ tu_cs_emit(cs, value);
+ } else {
+ /* Use a RB_DONE_TS event to wait for everything to complete. */
+ tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
+ tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
+ tu_cs_emit_qw(cs, event->bo.iova);
+ tu_cs_emit(cs, value);
+ }
}
void
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event);
- write_event(cmd, event, 1);
+ write_event(cmd, event, stageMask, 1);
}
void
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_event, event, _event);
- write_event(cmd, event, 0);
+ write_event(cmd, event, stageMask, 0);
}
void
const VkImageMemoryBarrier *pImageMemoryBarriers)
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
- struct tu_cs *cs = &cmd->cs;
-
- /* TODO: any flush required before/after? (CP_WAIT_FOR_ME?) */
-
- for (uint32_t i = 0; i < eventCount; i++) {
- TU_FROM_HANDLE(tu_event, event, pEvents[i]);
+ struct tu_barrier_info info;
- tu_bo_list_add(&cmd->bo_list, &event->bo, MSM_SUBMIT_BO_READ);
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
- tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
- tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
- CP_WAIT_REG_MEM_0_POLL_MEMORY);
- tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
- tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
- tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
- tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
- }
+ tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
}
void