2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
30 #include "util/os_file.h"
31 #include "util/os_time.h"
34 * Internally, a fence can be in one of these states.
39 TU_FENCE_STATE_PENDING
,
40 TU_FENCE_STATE_SIGNALED
,
43 static enum tu_fence_state
44 tu_fence_get_state(const struct tu_fence
*fence
)
47 assert(fence
->fd
< 0);
50 return TU_FENCE_STATE_SIGNALED
;
51 else if (fence
->fd
>= 0)
52 return TU_FENCE_STATE_PENDING
;
54 return TU_FENCE_STATE_RESET
;
58 tu_fence_set_state(struct tu_fence
*fence
, enum tu_fence_state state
, int fd
)
64 case TU_FENCE_STATE_RESET
:
66 fence
->signaled
= false;
69 case TU_FENCE_STATE_PENDING
:
71 fence
->signaled
= false;
74 case TU_FENCE_STATE_SIGNALED
:
76 fence
->signaled
= true;
80 unreachable("unknown fence state");
86 tu_fence_init(struct tu_fence
*fence
, bool signaled
)
88 fence
->signaled
= signaled
;
90 fence
->fence_wsi
= NULL
;
94 tu_fence_finish(struct tu_fence
*fence
)
99 fence
->fence_wsi
->destroy(fence
->fence_wsi
);
103 * Update the associated fd of a fence. Ownership of \a fd is transferred to
106 * This function does not block. \a fence can also be in any state when this
107 * function is called. To be able to do that, the caller must make sure that,
108 * when both the currently associated fd and the new fd are valid, they are on
109 * the same timeline with the new fd being later on the timeline.
112 tu_fence_update_fd(struct tu_fence
*fence
, int fd
)
114 const enum tu_fence_state state
=
115 fd
>= 0 ? TU_FENCE_STATE_PENDING
: TU_FENCE_STATE_SIGNALED
;
116 tu_fence_set_state(fence
, state
, fd
);
120 * Make a fence a copy of another fence. \a fence must be in the reset state.
123 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
)
125 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
130 fd
= os_dupfd_cloexec(src
->fd
);
132 tu_loge("failed to dup fd %d for fence", src
->fd
);
133 sync_wait(src
->fd
, -1);
137 tu_fence_update_fd(fence
, fd
);
141 * Signal a fence. \a fence must be in the reset state.
144 tu_fence_signal(struct tu_fence
*fence
)
146 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
147 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
151 * Wait until a fence is idle (i.e., not pending).
154 tu_fence_wait_idle(struct tu_fence
*fence
)
156 if (fence
->fd
>= 0) {
157 if (sync_wait(fence
->fd
, -1))
158 tu_loge("sync_wait on fence fd %d failed", fence
->fd
);
160 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
165 tu_CreateFence(VkDevice _device
,
166 const VkFenceCreateInfo
*pCreateInfo
,
167 const VkAllocationCallbacks
*pAllocator
,
170 TU_FROM_HANDLE(tu_device
, device
, _device
);
172 struct tu_fence
*fence
=
173 vk_object_alloc(&device
->vk
, pAllocator
, sizeof(*fence
),
174 VK_OBJECT_TYPE_FENCE
);
176 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
178 tu_fence_init(fence
, pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
);
180 *pFence
= tu_fence_to_handle(fence
);
186 tu_DestroyFence(VkDevice _device
,
188 const VkAllocationCallbacks
*pAllocator
)
190 TU_FROM_HANDLE(tu_device
, device
, _device
);
191 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
196 tu_fence_finish(fence
);
198 vk_object_free(&device
->vk
, pAllocator
, fence
);
202 * Initialize a pollfd array from fences.
205 tu_fence_init_poll_fds(uint32_t fence_count
,
206 const VkFence
*fences
,
211 for (uint32_t i
= 0; i
< fence_count
; i
++) {
212 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
214 /* skip wsi fences */
215 if (fence
->fence_wsi
)
218 if (fence
->signaled
) {
220 /* skip signaled fences */
223 /* no need to poll any fd */
229 /* negative fds are never ready, which is the desired behavior */
230 fds
[nfds
].fd
= fence
->fd
;
231 fds
[nfds
].events
= POLLIN
;
232 fds
[nfds
].revents
= 0;
240 * Translate timeout from nanoseconds to milliseconds for poll().
243 tu_fence_get_poll_timeout(uint64_t timeout_ns
)
245 const uint64_t ns_per_ms
= 1000 * 1000;
246 uint64_t timeout_ms
= timeout_ns
/ ns_per_ms
;
248 /* round up if needed */
249 if (timeout_ns
- timeout_ms
* ns_per_ms
>= ns_per_ms
/ 2)
252 return timeout_ms
< INT_MAX
? timeout_ms
: INT_MAX
;
256 * Poll a pollfd array.
259 tu_fence_poll_fds(struct pollfd
*fds
, nfds_t nfds
, uint64_t *timeout_ns
)
263 uint64_t duration
= os_time_get_nano();
264 int ret
= poll(fds
, nfds
, tu_fence_get_poll_timeout(*timeout_ns
));
265 duration
= os_time_get_nano() - duration
;
267 /* update timeout_ns */
268 if (*timeout_ns
> duration
)
269 *timeout_ns
-= duration
;
275 } else if (ret
== 0) {
278 } else if (errno
!= EINTR
&& errno
!= EAGAIN
) {
279 return VK_ERROR_OUT_OF_HOST_MEMORY
;
285 * Update a pollfd array and the fence states. This should be called after a
286 * successful call to tu_fence_poll_fds.
289 tu_fence_update_fences_and_poll_fds(uint32_t fence_count
,
290 const VkFence
*fences
,
295 uint32_t fds_idx
= 0;
296 for (uint32_t i
= 0; i
< fence_count
; i
++) {
297 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
299 /* skip wsi fences */
300 if (fence
->fence_wsi
)
303 /* no signaled fence in fds */
307 /* fds[fds_idx] corresponds to fences[i] */
308 assert(fence
->fd
== fds
[fds_idx
].fd
);
310 assert(nfds
<= fds_idx
&& fds_idx
<= i
);
312 /* fd is ready (errors are treated as ready) */
313 if (fds
[fds_idx
].revents
) {
314 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
315 } else if (wait_all
) {
316 /* add to fds again for another poll */
317 fds
[nfds
].fd
= fence
->fd
;
318 fds
[nfds
].events
= POLLIN
;
319 fds
[nfds
].revents
= 0;
330 tu_WaitForFences(VkDevice _device
,
332 const VkFence
*pFences
,
336 TU_FROM_HANDLE(tu_device
, device
, _device
);
338 if (tu_device_is_lost(device
))
339 return VK_ERROR_DEVICE_LOST
;
341 /* add a simpler path for when fenceCount == 1? */
343 struct pollfd stack_fds
[8];
344 struct pollfd
*fds
= stack_fds
;
345 if (fenceCount
> ARRAY_SIZE(stack_fds
)) {
346 fds
= vk_alloc(&device
->vk
.alloc
, sizeof(*fds
) * fenceCount
, 8,
347 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
349 return VK_ERROR_OUT_OF_HOST_MEMORY
;
352 /* set up pollfd array and start polling */
353 nfds_t nfds
= tu_fence_init_poll_fds(fenceCount
, pFences
, waitAll
, fds
);
354 VkResult result
= VK_SUCCESS
;
356 result
= tu_fence_poll_fds(fds
, nfds
, &timeout
);
357 if (result
!= VK_SUCCESS
)
359 nfds
= tu_fence_update_fences_and_poll_fds(fenceCount
, pFences
, waitAll
,
363 if (fds
!= stack_fds
)
364 vk_free(&device
->vk
.alloc
, fds
);
366 if (result
!= VK_SUCCESS
)
369 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
370 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
371 if (fence
->fence_wsi
) {
372 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, timeout
);
373 if (result
!= VK_SUCCESS
)
382 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
384 for (unsigned i
= 0; i
< fenceCount
; ++i
) {
385 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
386 assert(tu_fence_get_state(fence
) != TU_FENCE_STATE_PENDING
);
387 tu_fence_set_state(fence
, TU_FENCE_STATE_RESET
, -1);
394 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
396 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
398 if (fence
->fd
>= 0) {
399 int err
= sync_wait(fence
->fd
, 0);
401 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
402 else if (err
&& errno
!= ETIME
)
403 return VK_ERROR_OUT_OF_HOST_MEMORY
;
405 if (fence
->fence_wsi
) {
406 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, 0);
408 if (result
!= VK_SUCCESS
) {
409 if (result
== VK_TIMEOUT
)
415 return fence
->signaled
? VK_SUCCESS
: VK_NOT_READY
;