Reviewed-by: Marek Olšák <marek.olsak@amd.com>
{
struct util_queue_job *ptr;
- assert(fence->signalled);
-
mtx_lock(&queue->lock);
if (queue->kill_threads) {
mtx_unlock(&queue->lock);
return;
}
- fence->signalled = false;
+ util_queue_fence_reset(fence);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
void util_queue_fence_wait(struct util_queue_fence *fence);
void util_queue_fence_signal(struct util_queue_fence *fence);
+/**
+ * Move \p fence back into unsignalled state.
+ *
+ * \warning The caller must ensure that no other thread may currently be
+ * waiting (or about to wait) on the fence.
+ */
+static inline void
+util_queue_fence_reset(struct util_queue_fence *fence)
+{
+ assert(fence->signalled);
+ fence->signalled = 0;
+}
+
static inline bool
util_queue_fence_is_signalled(struct util_queue_fence *fence)
{