+ if (pending_fences && !signaled_fences) {
+ /* If we've hit this then someone decided to vkWaitForFences before
+ * they've actually submitted any of them to a queue. This is a
+ * fairly pessimal case, so it's ok to lock here and use a standard
+ * pthreads condition variable.
+ */
+ pthread_mutex_lock(&device->mutex);
+
+ /* It's possible that some of the fences have changed state since the
+ * last time we checked. Now that we have the lock, check for
+ * pending fences again and don't wait if it's changed.
+ */
+ uint32_t now_pending_fences = 0;
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ if (fence->state == ANV_FENCE_STATE_RESET)
+ now_pending_fences++;
+ }
+ assert(now_pending_fences <= pending_fences);
+
+ if (now_pending_fences == pending_fences) {
+ struct timespec before;
+ clock_gettime(CLOCK_MONOTONIC, &before);
+
+ uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
+ uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
+ (timeout / NSEC_PER_SEC);
+ abs_nsec %= NSEC_PER_SEC;
+
+ /* Avoid roll-over in tv_sec on 32-bit systems if the user
+ * provided timeout is UINT64_MAX
+ */
+ struct timespec abstime;
+ abstime.tv_nsec = abs_nsec;
+ abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
+
+ ret = pthread_cond_timedwait(&device->queue_submit,
+ &device->mutex, &abstime);
+ assert(ret != EINVAL);
+
+ struct timespec after;
+ clock_gettime(CLOCK_MONOTONIC, &after);
+ uint64_t time_elapsed =
+ ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
+ ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
+
+ if (time_elapsed >= timeout) {
+ pthread_mutex_unlock(&device->mutex);
+ return VK_TIMEOUT;
+ }
+
+ timeout -= time_elapsed;
+ }