+ SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef SRC_INCOHERENT_FLUSH
+
+ /* Treat host & sysmem write accesses the same, since the kernel implicitly
+ * drains the queue before signalling completion to the host.
+ */
+ if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE |
+ TU_ACCESS_HOST_READ | TU_ACCESS_HOST_WRITE)) {
+ flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
+ }
+
+#define DST_FLUSH(domain, flush, invalidate) \
+ if (dst_mask & (TU_ACCESS_##domain##_READ | \
+ TU_ACCESS_##domain##_WRITE)) { \
+ flush_bits |= cache->pending_flush_bits & \
+ (TU_CMD_FLAG_##invalidate | \
+ (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
+ }
+
+ DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
+ DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_FLUSH
+
+#define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
+ if (dst_mask & (TU_ACCESS_##domain##_READ | \
+ TU_ACCESS_##domain##_WRITE)) { \
+ flush_bits |= TU_CMD_FLAG_##invalidate | \
+ (cache->pending_flush_bits & \
+ (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush)); \
+ }
+
+ DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
+ DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
+
+#undef DST_INCOHERENT_FLUSH
+
+ if (dst_mask & TU_ACCESS_WFI_READ) {
+ flush_bits |= cache->pending_flush_bits &
+ (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_IDLE);
+ }
+
+ if (dst_mask & TU_ACCESS_WFM_READ) {
+ flush_bits |= cache->pending_flush_bits &
+ (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_ME);
+ }
+
+ cache->flush_bits |= flush_bits;
+ cache->pending_flush_bits &= ~flush_bits;
+}
+
+static enum tu_cmd_access_mask
+vk2tu_access(VkAccessFlags flags, bool gmem)
+{
+ enum tu_cmd_access_mask mask = 0;
+
+ /* If the GPU writes a buffer that is then read by an indirect draw
+ * command, we theoretically need to emit a WFI to wait for any cache
+ * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
+ * complete. Waiting for the WFI to complete is performed as part of the
+ * draw by the firmware, so we just need to execute the WFI.
+ *
+ * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
+ * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
+ */
+ if (flags &
+ (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_WFI_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
+ VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
+ VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_SYSMEM_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_CP_WRITE;
+ }
+
+ if (flags &
+ (VK_ACCESS_HOST_READ_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_HOST_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_HOST_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_HOST_WRITE;
+ }
+
+ if (flags &
+ (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
+ VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
+ /* TODO: Is there a no-cache bit for textures so that we can ignore
+ * these?
+ */
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
+ VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_UCHE_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
+ VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ mask |= TU_ACCESS_UCHE_WRITE;
+ }
+
+ /* When using GMEM, the CCU is always flushed automatically to GMEM, and
+ * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
+ * previous writes in sysmem mode when transitioning to GMEM. Therefore we
+ * can ignore CCU and pretend that color attachments and transfers use
+ * sysmem directly.
+ */
+
+ if (flags &
+ (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ if (gmem)
+ mask |= TU_ACCESS_SYSMEM_READ;
+ else
+ mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ if (gmem)
+ mask |= TU_ACCESS_SYSMEM_READ;
+ else
+ mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
+ }
+ }
+
+ if (flags &
+ (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
+ }
+ }
+
+ /* When the dst access is a transfer read/write, it seems we sometimes need
+ * to insert a WFI after any flushes, to guarantee that the flushes finish
+ * before the 2D engine starts. However the opposite (i.e. a WFI after
+ * CP_BLIT and before any subsequent flush) does not seem to be needed, and
+ * the blob doesn't emit such a WFI.
+ */
+
+ if (flags &
+ (VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_MEMORY_WRITE_BIT)) {
+ if (gmem) {
+ mask |= TU_ACCESS_SYSMEM_WRITE;
+ } else {
+ mask |= TU_ACCESS_CCU_COLOR_WRITE;
+ }
+ mask |= TU_ACCESS_WFI_READ;
+ }
+
+ if (flags &
+ (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
+ VK_ACCESS_MEMORY_READ_BIT)) {
+ mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
+ }
+
+ return mask;
+}
+
+
+void
+tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCmdBuffers)
+{
+ TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
+ VkResult result;
+
+ assert(commandBufferCount > 0);
+
+ /* Emit any pending flushes. */
+ if (cmd->state.pass) {
+ tu_flush_all_pending(&cmd->state.renderpass_cache);
+ tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
+ } else {
+ tu_flush_all_pending(&cmd->state.cache);
+ tu_emit_cache_flush(cmd, &cmd->cs);
+ }
+
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
+
+ result = tu_bo_list_merge(&cmd->bo_list, &secondary->bo_list);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ if (secondary->usage_flags &
+ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
+ assert(tu_cs_is_empty(&secondary->cs));
+
+ result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
+ &secondary->draw_epilogue_cs);
+ if (result != VK_SUCCESS) {
+ cmd->record_result = result;
+ break;
+ }
+
+ if (secondary->has_tess)
+ cmd->has_tess = true;
+ } else {
+ assert(tu_cs_is_empty(&secondary->draw_cs));
+ assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
+
+ for (uint32_t j = 0; j < secondary->cs.bo_count; j++) {
+ tu_bo_list_add(&cmd->bo_list, secondary->cs.bos[j],
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ }
+
+ tu_cs_add_entries(&cmd->cs, &secondary->cs);
+ }
+
+ cmd->state.index_size = secondary->state.index_size; /* for restart index update */
+ }
+ cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
+
+ /* After executing secondary command buffers, there may have been arbitrary
+ * flushes executed, so when we encounter a pipeline barrier with a
+ * srcMask, we have to assume that we need to invalidate. Therefore we need
+ * to re-initialize the cache with all pending invalidate bits set.
+ */
+ if (cmd->state.pass) {
+ tu_cache_init(&cmd->state.renderpass_cache);
+ } else {
+ tu_cache_init(&cmd->state.cache);
+ }
+}
+
+VkResult
+tu_CreateCommandPool(VkDevice _device,
+ const VkCommandPoolCreateInfo *pCreateInfo,