- struct radeon_winsys *rws = ((struct si_screen*)screen)->ws;
- struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
- int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
-
- if (!util_queue_fence_is_signalled(&rfence->ready)) {
- if (rfence->tc_token) {
- /* Ensure that si_flush_from_st will be called for
- * this fence, but only if we're in the API thread
- * where the context is current.
- *
- * Note that the batch containing the flush may already
- * be in flight in the driver thread, so the fence
- * may not be ready yet when this call returns.
- */
- threaded_context_flush(ctx, rfence->tc_token,
- timeout == 0);
- }
-
- if (!timeout)
- return false;
-
- if (timeout == PIPE_TIMEOUT_INFINITE) {
- util_queue_fence_wait(&rfence->ready);
- } else {
- if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout))
- return false;
- }
-
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
- int64_t time = os_time_get_nano();
- timeout = abs_timeout > time ? abs_timeout - time : 0;
- }
- }
-
- if (rfence->sdma) {
- if (!rws->fence_wait(rws, rfence->sdma, timeout))
- return false;
-
- /* Recompute the timeout after waiting. */
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
- int64_t time = os_time_get_nano();
- timeout = abs_timeout > time ? abs_timeout - time : 0;
- }
- }
-
- if (!rfence->gfx)
- return true;
-
- if (rfence->fine.buf &&
- si_fine_fence_signaled(rws, &rfence->fine)) {
- rws->fence_reference(&rfence->gfx, NULL);
- r600_resource_reference(&rfence->fine.buf, NULL);
- return true;
- }
-
- /* Flush the gfx IB if it hasn't been flushed yet. */
- if (ctx && rfence->gfx_unflushed.ctx) {
- struct si_context *sctx;
-
- sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
- if (rfence->gfx_unflushed.ctx == &sctx->b &&
- rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
- /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
- * spec says:
- *
- * "If the sync object being blocked upon will not be
- * signaled in finite time (for example, by an associated
- * fence command issued previously, but not yet flushed to
- * the graphics pipeline), then ClientWaitSync may hang
- * forever. To help prevent this behavior, if
- * ClientWaitSync is called and all of the following are
- * true:
- *
- * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
- * * sync is unsignaled when ClientWaitSync is called,
- * * and the calls to ClientWaitSync and FenceSync were
- * issued from the same context,
- *
- * then the GL will behave as if the equivalent of Flush
- * were inserted immediately after the creation of sync."
- *
- * This means we need to flush for such fences even when we're
- * not going to wait.
- */
- threaded_context_unwrap_sync(ctx);
- si_flush_gfx_cs(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
- rfence->gfx_unflushed.ctx = NULL;
-
- if (!timeout)
- return false;
-
- /* Recompute the timeout after all that. */
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
- int64_t time = os_time_get_nano();
- timeout = abs_timeout > time ? abs_timeout - time : 0;
- }
- }
- }
-
- if (rws->fence_wait(rws, rfence->gfx, timeout))
- return true;
-
- /* Re-check in case the GPU is slow or hangs, but the commands before
- * the fine-grained fence have completed. */
- if (rfence->fine.buf &&
- si_fine_fence_signaled(rws, &rfence->fine))
- return true;
-
- return false;
+ struct radeon_winsys *rws = ((struct si_screen *)screen)->ws;
+ struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
+ struct si_context *sctx;
+ int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
+
+ ctx = threaded_context_unwrap_sync(ctx);
+ sctx = (struct si_context *)(ctx ? ctx : NULL);
+
+ if (!util_queue_fence_is_signalled(&sfence->ready)) {
+ if (sfence->tc_token) {
+ /* Ensure that si_flush_from_st will be called for
+ * this fence, but only if we're in the API thread
+ * where the context is current.
+ *
+ * Note that the batch containing the flush may already
+ * be in flight in the driver thread, so the fence
+ * may not be ready yet when this call returns.
+ */
+ threaded_context_flush(ctx, sfence->tc_token, timeout == 0);
+ }
+
+ if (!timeout)
+ return false;
+
+ if (timeout == PIPE_TIMEOUT_INFINITE) {
+ util_queue_fence_wait(&sfence->ready);
+ } else {
+ if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
+ return false;
+ }
+
+ if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ int64_t time = os_time_get_nano();
+ timeout = abs_timeout > time ? abs_timeout - time : 0;
+ }
+ }
+
+ if (sfence->sdma) {
+ if (!rws->fence_wait(rws, sfence->sdma, timeout))
+ return false;
+
+ /* Recompute the timeout after waiting. */
+ if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ int64_t time = os_time_get_nano();
+ timeout = abs_timeout > time ? abs_timeout - time : 0;
+ }
+ }
+
+ if (!sfence->gfx)
+ return true;
+
+ if (sfence->fine.buf && si_fine_fence_signaled(rws, &sfence->fine)) {
+ rws->fence_reference(&sfence->gfx, NULL);
+ si_resource_reference(&sfence->fine.buf, NULL);
+ return true;
+ }
+
+ /* Flush the gfx IB if it hasn't been flushed yet. */
+ if (sctx && sfence->gfx_unflushed.ctx == sctx &&
+ sfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) {
+ /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
+ * spec says:
+ *
+ * "If the sync object being blocked upon will not be
+ * signaled in finite time (for example, by an associated
+ * fence command issued previously, but not yet flushed to
+ * the graphics pipeline), then ClientWaitSync may hang
+ * forever. To help prevent this behavior, if
+ * ClientWaitSync is called and all of the following are
+ * true:
+ *
+ * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
+ * * sync is unsignaled when ClientWaitSync is called,
+ * * and the calls to ClientWaitSync and FenceSync were
+ * issued from the same context,
+ *
+ * then the GL will behave as if the equivalent of Flush
+ * were inserted immediately after the creation of sync."
+ *
+ * This means we need to flush for such fences even when we're
+ * not going to wait.
+ */
+ si_flush_gfx_cs(sctx, (timeout ? 0 : PIPE_FLUSH_ASYNC) | RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
+ NULL);
+ sfence->gfx_unflushed.ctx = NULL;
+
+ if (!timeout)
+ return false;
+
+ /* Recompute the timeout after all that. */
+ if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ int64_t time = os_time_get_nano();
+ timeout = abs_timeout > time ? abs_timeout - time : 0;
+ }
+ }
+
+ if (rws->fence_wait(rws, sfence->gfx, timeout))
+ return true;
+
+ /* Re-check in case the GPU is slow or hangs, but the commands before
+ * the fine-grained fence have completed. */
+ if (sfence->fine.buf && si_fine_fence_signaled(rws, &sfence->fine))
+ return true;
+
+ return false;