fence->fence.ip_type = ip_type;
fence->fence.ip_instance = ip_instance;
fence->fence.ring = ring;
- fence->submission_in_progress = true;
+ util_queue_fence_init(&fence->submitted);
+ util_queue_fence_reset(&fence->submitted);
p_atomic_inc(&ctx->refcount);
return (struct pipe_fence_handle *)fence;
}
FREE(fence);
return NULL;
}
+
+ util_queue_fence_init(&fence->submitted);
+
return (struct pipe_fence_handle*)fence;
}
return r ? -1 : fd;
}
- os_wait_until_zero(&fence->submission_in_progress, PIPE_TIMEOUT_INFINITE);
+ util_queue_fence_wait(&fence->submitted);
/* Convert the amdgpu fence into a fence FD. */
int fd;
rfence->fence.fence = seq_no;
rfence->user_fence_cpu_address = user_fence_cpu_address;
- rfence->submission_in_progress = false;
+ util_queue_fence_signal(&rfence->submitted);
}
static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
rfence->signalled = true;
- rfence->submission_in_progress = false;
+ util_queue_fence_signal(&rfence->submitted);
}
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
/* The fence might not have a number assigned if its IB is being
* submitted in the other thread right now. Wait until the submission
* is done. */
- if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
- abs_timeout))
+ if (!util_queue_fence_wait_timeout(&rfence->submitted, abs_timeout))
return false;
user_fence_cpu = rfence->user_fence_cpu_address;
struct amdgpu_cs_context *cs = acs->csc;
struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
+ util_queue_fence_wait(&fence->submitted);
+
if (is_noop_fence_dependency(acs, fence))
return;
continue;
}
- assert(!fence->submission_in_progress);
+ assert(util_queue_fence_is_signalled(&fence->submitted));
amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[num++]);
}
if (!amdgpu_fence_is_syncobj(fence))
continue;
- assert(!fence->submission_in_progress);
+ assert(util_queue_fence_is_signalled(&fence->submitted));
sem_chunk[num++].handle = fence->syncobj;
}
struct amdgpu_cs_fence fence;
uint64_t *user_fence_cpu_address;
- /* If the fence is unknown due to an IB still being submitted
- * in the other thread. */
- volatile int submission_in_progress; /* bool (int for atomicity) */
+ /* If the fence has been submitted. This is unsignalled for deferred fences
+ * (cs->next_fence) and while an IB is still being submitted in the submit
+ * thread. */
+ struct util_queue_fence submitted;
+
volatile int signalled; /* bool (int for atomicity) */
};
else
amdgpu_ctx_unref(fence->ctx);
+ util_queue_fence_destroy(&fence->submitted);
FREE(fence);
}
*rdst = rsrc;
return fence;
}
+static void
+radeon_drm_cs_add_fence_dependency(struct radeon_winsys_cs *cs,
+ struct pipe_fence_handle *fence)
+{
+ /* TODO: Handle the following unlikely multi-threaded scenario:
+ *
+ * Thread 1 / Context 1 Thread 2 / Context 2
+ * -------------------- --------------------
+ * f = cs_get_next_fence()
+ * cs_add_fence_dependency(f)
+ * cs_flush()
+ * cs_flush()
+ *
+ * We currently assume that this does not happen because we don't support
+ * asynchronous flushes on Radeon.
+ */
+}
+
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.ctx_create = radeon_drm_ctx_create;
ws->base.cs_get_next_fence = radeon_drm_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
+ ws->base.cs_add_fence_dependency = radeon_drm_cs_add_fence_dependency;
ws->base.fence_wait = radeon_fence_wait;
ws->base.fence_reference = radeon_fence_reference;
}