- struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
- struct radeon_winsys *ws = ctx->b.ws;
-
- if (ctx->gfx_flush_in_progress)
- return;
-
- if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
- return;
-
- if (si_check_device_reset(&ctx->b))
- return;
-
- if (ctx->screen->debug_flags & DBG(CHECK_VM))
- flags &= ~PIPE_FLUSH_ASYNC;
-
- /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
- * responsible for flushing the DMA IB and merging the fences from both.
- * This code is only needed when the driver flushes the GFX IB
- * internally, and it never asks for a fence handle.
- */
- if (radeon_emitted(ctx->b.dma_cs, 0)) {
- assert(fence == NULL); /* internal flushes only */
- si_flush_dma_cs(ctx, flags, NULL);
- }
-
- ctx->gfx_flush_in_progress = true;
-
- if (!LIST_IS_EMPTY(&ctx->b.active_queries))
- si_suspend_queries(ctx);
-
- ctx->streamout.suspended = false;
- if (ctx->streamout.begin_emitted) {
- si_emit_streamout_end(ctx);
- ctx->streamout.suspended = true;
- }
-
- ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_PS_PARTIAL_FLUSH;
-
- /* DRM 3.1.0 doesn't flush TC for VI correctly. */
- if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
- ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_INV_VMEM_L1;
-
- si_emit_cache_flush(ctx);
-
- if (ctx->current_saved_cs) {
- si_trace_emit(ctx);
- si_log_hw_flush(ctx);
-
- /* Save the IB for debug contexts. */
- si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
- ctx->current_saved_cs->flushed = true;
- ctx->current_saved_cs->time_flush = os_time_get_nano();
- }
-
- /* Flush the CS. */
- ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
- if (fence)
- ws->fence_reference(fence, ctx->b.last_gfx_fence);
-
- /* This must be after cs_flush returns, since the context's API
- * thread can concurrently read this value in si_fence_finish. */
- ctx->b.num_gfx_cs_flushes++;
-
- /* Check VM faults if needed. */
- if (ctx->screen->debug_flags & DBG(CHECK_VM)) {
- /* Use conservative timeout 800ms, after which we won't wait any
- * longer and assume the GPU is hung.
- */
- ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
-
- si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
- }
-
- if (ctx->current_saved_cs)
- si_saved_cs_reference(&ctx->current_saved_cs, NULL);
-
- si_begin_new_gfx_cs(ctx);
- ctx->gfx_flush_in_progress = false;
+ for (unsigned i = 0; i < sctx->num_sdma_uploads; i++) {
+ si_resource_reference(&sctx->sdma_uploads[i].dst, NULL);
+ si_resource_reference(&sctx->sdma_uploads[i].src, NULL);
+ }
+ sctx->num_sdma_uploads = 0;
+}
+
+void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
+{
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
+ struct radeon_winsys *ws = ctx->ws;
+ const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
+ unsigned wait_flags = 0;
+
+ if (ctx->gfx_flush_in_progress)
+ return;
+
+ if (!ctx->screen->info.kernel_flushes_tc_l2_after_ib) {
+ wait_flags |= wait_ps_cs | SI_CONTEXT_INV_L2;
+ } else if (ctx->chip_class == GFX6) {
+ /* The kernel flushes L2 before shaders are finished. */
+ wait_flags |= wait_ps_cs;
+ } else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
+ wait_flags |= wait_ps_cs;
+ }
+
+ /* Drop this flush if it's a no-op. */
+ if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) && (!wait_flags || !ctx->gfx_last_ib_is_busy))
+ return;
+
+ if (ctx->b.get_device_reset_status(&ctx->b) != PIPE_NO_RESET)
+ return;
+
+ if (ctx->screen->debug_flags & DBG(CHECK_VM))
+ flags &= ~PIPE_FLUSH_ASYNC;
+
+ ctx->gfx_flush_in_progress = true;
+
+ /* If the gallium frontend is flushing the GFX IB, si_flush_from_st is
+ * responsible for flushing the DMA IB and merging the fences from both.
+ * If the driver flushes the GFX IB internally, and it should never ask
+ * for a fence handle.
+ */
+ assert(!radeon_emitted(ctx->sdma_cs, 0) || fence == NULL);
+
+ /* Update the sdma_uploads list by flushing the uploader. */
+ u_upload_unmap(ctx->b.const_uploader);
+
+ /* Execute SDMA uploads. */
+ ctx->sdma_uploads_in_progress = true;
+ for (unsigned i = 0; i < ctx->num_sdma_uploads; i++) {
+ struct si_sdma_upload *up = &ctx->sdma_uploads[i];
+
+ assert(up->src_offset % 4 == 0 && up->dst_offset % 4 == 0 && up->size % 4 == 0);
+
+ si_sdma_copy_buffer(ctx, &up->dst->b.b, &up->src->b.b, up->dst_offset, up->src_offset,
+ up->size);
+ }
+ ctx->sdma_uploads_in_progress = false;
+ si_unref_sdma_uploads(ctx);
+
+ /* Flush SDMA (preamble IB). */
+ if (radeon_emitted(ctx->sdma_cs, 0))
+ si_flush_dma_cs(ctx, flags, NULL);
+
+ if (radeon_emitted(ctx->prim_discard_compute_cs, 0)) {
+ struct radeon_cmdbuf *compute_cs = ctx->prim_discard_compute_cs;
+ si_compute_signal_gfx(ctx);
+
+ /* Make sure compute shaders are idle before leaving the IB, so that
+ * the next IB doesn't overwrite GDS that might be in use. */
+ radeon_emit(compute_cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(compute_cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+
+ /* Save the GDS prim restart counter if needed. */
+ if (ctx->preserve_prim_restart_gds_at_flush) {
+ si_cp_copy_data(ctx, compute_cs, COPY_DATA_DST_MEM, ctx->wait_mem_scratch, 4,
+ COPY_DATA_GDS, NULL, 4);
+ }
+ }
+
+ if (ctx->has_graphics) {
+ if (!list_is_empty(&ctx->active_queries))
+ si_suspend_queries(ctx);
+
+ ctx->streamout.suspended = false;
+ if (ctx->streamout.begin_emitted) {
+ si_emit_streamout_end(ctx);
+ ctx->streamout.suspended = true;
+
+ /* Since NGG streamout uses GDS, we need to make GDS
+ * idle when we leave the IB, otherwise another process
+ * might overwrite it while our shaders are busy.
+ */
+ if (ctx->screen->use_ngg_streamout)
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
+ }
+ }
+
+ /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
+ * because the kernel doesn't wait for it. */
+ if (ctx->chip_class >= GFX7)
+ si_cp_dma_wait_for_idle(ctx);
+
+ /* Wait for draw calls to finish if needed. */
+ if (wait_flags) {
+ ctx->flags |= wait_flags;
+ ctx->emit_cache_flush(ctx);
+ }
+ ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
+
+ if (ctx->current_saved_cs) {
+ si_trace_emit(ctx);
+
+ /* Save the IB for debug contexts. */
+ si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
+ ctx->current_saved_cs->flushed = true;
+ ctx->current_saved_cs->time_flush = os_time_get_nano();
+
+ si_log_hw_flush(ctx);
+ }
+
+ if (si_compute_prim_discard_enabled(ctx)) {
+ /* The compute IB can start after the previous gfx IB starts. */
+ if (radeon_emitted(ctx->prim_discard_compute_cs, 0) && ctx->last_gfx_fence) {
+ ctx->ws->cs_add_fence_dependency(
+ ctx->gfx_cs, ctx->last_gfx_fence,
+ RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY | RADEON_DEPENDENCY_START_FENCE);
+ }
+
+ /* Remember the last execution barrier. It's in the IB.
+ * It will signal the start of the next compute IB.
+ */
+ if (flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW && ctx->last_pkt3_write_data) {
+ *ctx->last_pkt3_write_data = PKT3(PKT3_WRITE_DATA, 3, 0);
+ ctx->last_pkt3_write_data = NULL;
+
+ si_resource_reference(&ctx->last_ib_barrier_buf, ctx->barrier_buf);
+ ctx->last_ib_barrier_buf_offset = ctx->barrier_buf_offset;
+ si_resource_reference(&ctx->barrier_buf, NULL);
+
+ ws->fence_reference(&ctx->last_ib_barrier_fence, NULL);
+ }
+ }
+
+ /* Flush the CS. */
+ ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
+ if (fence)
+ ws->fence_reference(fence, ctx->last_gfx_fence);
+
+ ctx->num_gfx_cs_flushes++;
+
+ if (si_compute_prim_discard_enabled(ctx)) {
+ /* Remember the last execution barrier, which is the last fence
+ * in this case.
+ */
+ if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
+ ctx->last_pkt3_write_data = NULL;
+ si_resource_reference(&ctx->last_ib_barrier_buf, NULL);
+ ws->fence_reference(&ctx->last_ib_barrier_fence, ctx->last_gfx_fence);
+ }
+ }
+
+ /* Check VM faults if needed. */
+ if (ctx->screen->debug_flags & DBG(CHECK_VM)) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
+
+ si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
+ }
+
+ if (ctx->current_saved_cs)
+ si_saved_cs_reference(&ctx->current_saved_cs, NULL);
+
+ si_begin_new_gfx_cs(ctx);
+ ctx->gfx_flush_in_progress = false;