2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
30 #include "util/os_time.h"
33 * Internally, a fence can be in one of these states.
38 TU_FENCE_STATE_PENDING
,
39 TU_FENCE_STATE_SIGNALED
,
42 static enum tu_fence_state
43 tu_fence_get_state(const struct tu_fence
*fence
)
46 assert(fence
->fd
< 0);
49 return TU_FENCE_STATE_SIGNALED
;
50 else if (fence
->fd
>= 0)
51 return TU_FENCE_STATE_PENDING
;
53 return TU_FENCE_STATE_RESET
;
57 tu_fence_set_state(struct tu_fence
*fence
, enum tu_fence_state state
, int fd
)
63 case TU_FENCE_STATE_RESET
:
65 fence
->signaled
= false;
68 case TU_FENCE_STATE_PENDING
:
70 fence
->signaled
= false;
73 case TU_FENCE_STATE_SIGNALED
:
75 fence
->signaled
= true;
79 unreachable("unknown fence state");
85 tu_fence_init(struct tu_fence
*fence
, bool signaled
)
87 fence
->signaled
= signaled
;
89 fence
->fence_wsi
= NULL
;
93 tu_fence_finish(struct tu_fence
*fence
)
98 fence
->fence_wsi
->destroy(fence
->fence_wsi
);
102 * Update the associated fd of a fence. Ownership of \a fd is transferred to
105 * This function does not block. \a fence can also be in any state when this
106 * function is called. To be able to do that, the caller must make sure that,
107 * when both the currently associated fd and the new fd are valid, they are on
108 * the same timeline with the new fd being later on the timeline.
111 tu_fence_update_fd(struct tu_fence
*fence
, int fd
)
113 const enum tu_fence_state state
=
114 fd
>= 0 ? TU_FENCE_STATE_PENDING
: TU_FENCE_STATE_SIGNALED
;
115 tu_fence_set_state(fence
, state
, fd
);
119 * Make a fence a copy of another fence. \a fence must be in the reset state.
122 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
)
124 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
129 fd
= fcntl(src
->fd
, F_DUPFD_CLOEXEC
, 0);
131 tu_loge("failed to dup fd %d for fence", src
->fd
);
132 sync_wait(src
->fd
, -1);
136 tu_fence_update_fd(fence
, fd
);
140 * Signal a fence. \a fence must be in the reset state.
143 tu_fence_signal(struct tu_fence
*fence
)
145 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
146 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
150 * Wait until a fence is idle (i.e., not pending).
153 tu_fence_wait_idle(struct tu_fence
*fence
)
155 if (fence
->fd
>= 0) {
156 if (sync_wait(fence
->fd
, -1))
157 tu_loge("sync_wait on fence fd %d failed", fence
->fd
);
159 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
164 tu_CreateFence(VkDevice _device
,
165 const VkFenceCreateInfo
*pCreateInfo
,
166 const VkAllocationCallbacks
*pAllocator
,
169 TU_FROM_HANDLE(tu_device
, device
, _device
);
171 struct tu_fence
*fence
=
172 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
176 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
178 tu_fence_init(fence
, pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
);
180 *pFence
= tu_fence_to_handle(fence
);
186 tu_DestroyFence(VkDevice _device
,
188 const VkAllocationCallbacks
*pAllocator
)
190 TU_FROM_HANDLE(tu_device
, device
, _device
);
191 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
196 tu_fence_finish(fence
);
198 vk_free2(&device
->alloc
, pAllocator
, fence
);
202 * Initialize a pollfd array from fences.
205 tu_fence_init_poll_fds(uint32_t fence_count
,
206 const VkFence
*fences
,
211 for (uint32_t i
= 0; i
< fence_count
; i
++) {
212 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
214 /* skip wsi fences */
215 if (fence
->fence_wsi
)
218 if (fence
->signaled
) {
220 /* skip signaled fences */
223 /* no need to poll any fd */
229 /* negative fds are never ready, which is the desired behavior */
230 fds
[nfds
].fd
= fence
->fd
;
231 fds
[nfds
].events
= POLLIN
;
232 fds
[nfds
].revents
= 0;
240 * Translate timeout from nanoseconds to milliseconds for poll().
243 tu_fence_get_poll_timeout(uint64_t timeout_ns
)
245 const uint64_t ns_per_ms
= 1000 * 1000;
246 uint64_t timeout_ms
= timeout_ns
/ ns_per_ms
;
248 /* round up if needed */
249 if (timeout_ns
- timeout_ms
* ns_per_ms
>= ns_per_ms
/ 2)
252 return timeout_ms
< INT_MAX
? timeout_ms
: INT_MAX
;
256 * Poll a pollfd array.
259 tu_fence_poll_fds(struct pollfd
*fds
, nfds_t nfds
, uint64_t *timeout_ns
)
263 uint64_t duration
= os_time_get_nano();
264 int ret
= poll(fds
, nfds
, tu_fence_get_poll_timeout(*timeout_ns
));
265 duration
= os_time_get_nano() - duration
;
267 /* update timeout_ns */
268 if (*timeout_ns
> duration
)
269 *timeout_ns
-= duration
;
275 } else if (ret
== 0) {
278 } else if (errno
!= EINTR
&& errno
!= EAGAIN
) {
279 return VK_ERROR_OUT_OF_HOST_MEMORY
;
285 * Update a pollfd array and the fence states. This should be called after a
286 * successful call to tu_fence_poll_fds.
289 tu_fence_update_fences_and_poll_fds(uint32_t fence_count
,
290 const VkFence
*fences
,
295 uint32_t fds_idx
= 0;
296 for (uint32_t i
= 0; i
< fence_count
; i
++) {
297 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
299 /* skip wsi fences */
300 if (fence
->fence_wsi
)
303 /* no signaled fence in fds */
307 /* fds[fds_idx] corresponds to fences[i] */
308 assert(fence
->fd
== fds
[fds_idx
].fd
);
310 assert(nfds
<= fds_idx
&& fds_idx
<= i
);
312 /* fd is ready (errors are treated as ready) */
313 if (fds
[fds_idx
].revents
) {
314 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
315 } else if (wait_all
) {
316 /* add to fds again for another poll */
317 fds
[nfds
].fd
= fence
->fd
;
318 fds
[nfds
].events
= POLLIN
;
319 fds
[nfds
].revents
= 0;
330 tu_WaitForFences(VkDevice _device
,
332 const VkFence
*pFences
,
336 TU_FROM_HANDLE(tu_device
, device
, _device
);
338 /* add a simpler path for when fenceCount == 1? */
340 struct pollfd stack_fds
[8];
341 struct pollfd
*fds
= stack_fds
;
342 if (fenceCount
> ARRAY_SIZE(stack_fds
)) {
343 fds
= vk_alloc(&device
->alloc
, sizeof(*fds
) * fenceCount
, 8,
344 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
346 return VK_ERROR_OUT_OF_HOST_MEMORY
;
349 /* set up pollfd array and start polling */
350 nfds_t nfds
= tu_fence_init_poll_fds(fenceCount
, pFences
, waitAll
, fds
);
351 VkResult result
= VK_SUCCESS
;
353 result
= tu_fence_poll_fds(fds
, nfds
, &timeout
);
354 if (result
!= VK_SUCCESS
)
356 nfds
= tu_fence_update_fences_and_poll_fds(fenceCount
, pFences
, waitAll
,
360 if (fds
!= stack_fds
)
361 vk_free(&device
->alloc
, fds
);
363 if (result
!= VK_SUCCESS
)
366 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
367 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
368 if (fence
->fence_wsi
) {
369 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, timeout
);
370 if (result
!= VK_SUCCESS
)
379 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
381 for (unsigned i
= 0; i
< fenceCount
; ++i
) {
382 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
383 assert(tu_fence_get_state(fence
) != TU_FENCE_STATE_PENDING
);
384 tu_fence_set_state(fence
, TU_FENCE_STATE_RESET
, -1);
391 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
393 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
395 if (fence
->fd
>= 0) {
396 int err
= sync_wait(fence
->fd
, 0);
398 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
399 else if (err
&& errno
!= ETIME
)
400 return VK_ERROR_OUT_OF_HOST_MEMORY
;
402 if (fence
->fence_wsi
) {
403 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, 0);
405 if (result
!= VK_SUCCESS
) {
406 if (result
== VK_TIMEOUT
)
412 return fence
->signaled
? VK_SUCCESS
: VK_NOT_READY
;