2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
30 #include "util/os_file.h"
31 #include "util/os_time.h"
34 * Internally, a fence can be in one of these states.
39 TU_FENCE_STATE_PENDING
,
40 TU_FENCE_STATE_SIGNALED
,
43 static enum tu_fence_state
44 tu_fence_get_state(const struct tu_fence
*fence
)
47 assert(fence
->fd
< 0);
50 return TU_FENCE_STATE_SIGNALED
;
51 else if (fence
->fd
>= 0)
52 return TU_FENCE_STATE_PENDING
;
54 return TU_FENCE_STATE_RESET
;
58 tu_fence_set_state(struct tu_fence
*fence
, enum tu_fence_state state
, int fd
)
64 case TU_FENCE_STATE_RESET
:
66 fence
->signaled
= false;
69 case TU_FENCE_STATE_PENDING
:
71 fence
->signaled
= false;
74 case TU_FENCE_STATE_SIGNALED
:
76 fence
->signaled
= true;
80 unreachable("unknown fence state");
86 tu_fence_init(struct tu_fence
*fence
, bool signaled
)
88 fence
->signaled
= signaled
;
90 fence
->fence_wsi
= NULL
;
94 tu_fence_finish(struct tu_fence
*fence
)
99 fence
->fence_wsi
->destroy(fence
->fence_wsi
);
103 * Update the associated fd of a fence. Ownership of \a fd is transferred to
106 * This function does not block. \a fence can also be in any state when this
107 * function is called. To be able to do that, the caller must make sure that,
108 * when both the currently associated fd and the new fd are valid, they are on
109 * the same timeline with the new fd being later on the timeline.
112 tu_fence_update_fd(struct tu_fence
*fence
, int fd
)
114 const enum tu_fence_state state
=
115 fd
>= 0 ? TU_FENCE_STATE_PENDING
: TU_FENCE_STATE_SIGNALED
;
116 tu_fence_set_state(fence
, state
, fd
);
120 * Make a fence a copy of another fence. \a fence must be in the reset state.
123 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
)
125 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
130 fd
= os_dupfd_cloexec(src
->fd
);
132 tu_loge("failed to dup fd %d for fence", src
->fd
);
133 sync_wait(src
->fd
, -1);
137 tu_fence_update_fd(fence
, fd
);
141 * Signal a fence. \a fence must be in the reset state.
144 tu_fence_signal(struct tu_fence
*fence
)
146 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
147 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
151 * Wait until a fence is idle (i.e., not pending).
154 tu_fence_wait_idle(struct tu_fence
*fence
)
156 if (fence
->fd
>= 0) {
157 if (sync_wait(fence
->fd
, -1))
158 tu_loge("sync_wait on fence fd %d failed", fence
->fd
);
160 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
165 tu_CreateFence(VkDevice _device
,
166 const VkFenceCreateInfo
*pCreateInfo
,
167 const VkAllocationCallbacks
*pAllocator
,
170 TU_FROM_HANDLE(tu_device
, device
, _device
);
172 struct tu_fence
*fence
=
173 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
174 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
177 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
179 tu_fence_init(fence
, pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
);
181 *pFence
= tu_fence_to_handle(fence
);
187 tu_DestroyFence(VkDevice _device
,
189 const VkAllocationCallbacks
*pAllocator
)
191 TU_FROM_HANDLE(tu_device
, device
, _device
);
192 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
197 tu_fence_finish(fence
);
199 vk_free2(&device
->alloc
, pAllocator
, fence
);
203 * Initialize a pollfd array from fences.
206 tu_fence_init_poll_fds(uint32_t fence_count
,
207 const VkFence
*fences
,
212 for (uint32_t i
= 0; i
< fence_count
; i
++) {
213 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
215 /* skip wsi fences */
216 if (fence
->fence_wsi
)
219 if (fence
->signaled
) {
221 /* skip signaled fences */
224 /* no need to poll any fd */
230 /* negative fds are never ready, which is the desired behavior */
231 fds
[nfds
].fd
= fence
->fd
;
232 fds
[nfds
].events
= POLLIN
;
233 fds
[nfds
].revents
= 0;
241 * Translate timeout from nanoseconds to milliseconds for poll().
244 tu_fence_get_poll_timeout(uint64_t timeout_ns
)
246 const uint64_t ns_per_ms
= 1000 * 1000;
247 uint64_t timeout_ms
= timeout_ns
/ ns_per_ms
;
249 /* round up if needed */
250 if (timeout_ns
- timeout_ms
* ns_per_ms
>= ns_per_ms
/ 2)
253 return timeout_ms
< INT_MAX
? timeout_ms
: INT_MAX
;
257 * Poll a pollfd array.
260 tu_fence_poll_fds(struct pollfd
*fds
, nfds_t nfds
, uint64_t *timeout_ns
)
264 uint64_t duration
= os_time_get_nano();
265 int ret
= poll(fds
, nfds
, tu_fence_get_poll_timeout(*timeout_ns
));
266 duration
= os_time_get_nano() - duration
;
268 /* update timeout_ns */
269 if (*timeout_ns
> duration
)
270 *timeout_ns
-= duration
;
276 } else if (ret
== 0) {
279 } else if (errno
!= EINTR
&& errno
!= EAGAIN
) {
280 return VK_ERROR_OUT_OF_HOST_MEMORY
;
286 * Update a pollfd array and the fence states. This should be called after a
287 * successful call to tu_fence_poll_fds.
290 tu_fence_update_fences_and_poll_fds(uint32_t fence_count
,
291 const VkFence
*fences
,
296 uint32_t fds_idx
= 0;
297 for (uint32_t i
= 0; i
< fence_count
; i
++) {
298 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
300 /* skip wsi fences */
301 if (fence
->fence_wsi
)
304 /* no signaled fence in fds */
308 /* fds[fds_idx] corresponds to fences[i] */
309 assert(fence
->fd
== fds
[fds_idx
].fd
);
311 assert(nfds
<= fds_idx
&& fds_idx
<= i
);
313 /* fd is ready (errors are treated as ready) */
314 if (fds
[fds_idx
].revents
) {
315 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
316 } else if (wait_all
) {
317 /* add to fds again for another poll */
318 fds
[nfds
].fd
= fence
->fd
;
319 fds
[nfds
].events
= POLLIN
;
320 fds
[nfds
].revents
= 0;
331 tu_WaitForFences(VkDevice _device
,
333 const VkFence
*pFences
,
337 TU_FROM_HANDLE(tu_device
, device
, _device
);
339 /* add a simpler path for when fenceCount == 1? */
341 struct pollfd stack_fds
[8];
342 struct pollfd
*fds
= stack_fds
;
343 if (fenceCount
> ARRAY_SIZE(stack_fds
)) {
344 fds
= vk_alloc(&device
->alloc
, sizeof(*fds
) * fenceCount
, 8,
345 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
347 return VK_ERROR_OUT_OF_HOST_MEMORY
;
350 /* set up pollfd array and start polling */
351 nfds_t nfds
= tu_fence_init_poll_fds(fenceCount
, pFences
, waitAll
, fds
);
352 VkResult result
= VK_SUCCESS
;
354 result
= tu_fence_poll_fds(fds
, nfds
, &timeout
);
355 if (result
!= VK_SUCCESS
)
357 nfds
= tu_fence_update_fences_and_poll_fds(fenceCount
, pFences
, waitAll
,
361 if (fds
!= stack_fds
)
362 vk_free(&device
->alloc
, fds
);
364 if (result
!= VK_SUCCESS
)
367 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
368 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
369 if (fence
->fence_wsi
) {
370 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, timeout
);
371 if (result
!= VK_SUCCESS
)
380 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
382 for (unsigned i
= 0; i
< fenceCount
; ++i
) {
383 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
384 assert(tu_fence_get_state(fence
) != TU_FENCE_STATE_PENDING
);
385 tu_fence_set_state(fence
, TU_FENCE_STATE_RESET
, -1);
392 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
394 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
396 if (fence
->fd
>= 0) {
397 int err
= sync_wait(fence
->fd
, 0);
399 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
400 else if (err
&& errno
!= ETIME
)
401 return VK_ERROR_OUT_OF_HOST_MEMORY
;
403 if (fence
->fence_wsi
) {
404 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, 0);
406 if (result
!= VK_SUCCESS
) {
407 if (result
== VK_TIMEOUT
)
413 return fence
->signaled
? VK_SUCCESS
: VK_NOT_READY
;