2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
30 #include "util/os_time.h"
33 * Internally, a fence can be in one of these states.
38 TU_FENCE_STATE_PENDING
,
39 TU_FENCE_STATE_SIGNALED
,
42 static enum tu_fence_state
43 tu_fence_get_state(const struct tu_fence
*fence
)
46 assert(fence
->fd
< 0);
49 return TU_FENCE_STATE_SIGNALED
;
50 else if (fence
->fd
>= 0)
51 return TU_FENCE_STATE_PENDING
;
53 return TU_FENCE_STATE_RESET
;
57 tu_fence_set_state(struct tu_fence
*fence
, enum tu_fence_state state
, int fd
)
63 case TU_FENCE_STATE_RESET
:
65 fence
->signaled
= false;
68 case TU_FENCE_STATE_PENDING
:
70 fence
->signaled
= false;
73 case TU_FENCE_STATE_SIGNALED
:
75 fence
->signaled
= true;
79 unreachable("unknown fence state");
85 tu_fence_init(struct tu_fence
*fence
, bool signaled
)
87 fence
->signaled
= signaled
;
89 fence
->fence_wsi
= NULL
;
93 tu_fence_finish(struct tu_fence
*fence
)
98 fence
->fence_wsi
->destroy(fence
->fence_wsi
);
102 * Update the associated fd of a fence. Ownership of \a fd is transferred to
105 * This function does not block. \a fence can also be in any state when this
106 * function is called. To be able to do that, the caller must make sure that,
107 * when both the currently associated fd and the new fd are valid, they are on
108 * the same timeline with the new fd being later on the timeline.
111 tu_fence_update_fd(struct tu_fence
*fence
, int fd
)
113 const enum tu_fence_state state
=
114 fd
>= 0 ? TU_FENCE_STATE_PENDING
: TU_FENCE_STATE_SIGNALED
;
115 tu_fence_set_state(fence
, state
, fd
);
119 * Make a fence a copy of another fence. \a fence must be in the reset state.
122 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
)
124 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
129 fd
= fcntl(src
->fd
, F_DUPFD_CLOEXEC
, 0);
131 tu_loge("failed to dup fd %d for fence", src
->fd
);
132 sync_wait(src
->fd
, -1);
136 tu_fence_update_fd(fence
, fd
);
140 * Signal a fence. \a fence must be in the reset state.
143 tu_fence_signal(struct tu_fence
*fence
)
145 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
146 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
150 * Wait until a fence is idle (i.e., not pending).
153 tu_fence_wait_idle(struct tu_fence
*fence
)
155 if (fence
->fd
>= 0) {
156 if (sync_wait(fence
->fd
, -1))
157 tu_loge("sync_wait on fence fd %d failed", fence
->fd
);
159 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
164 tu_CreateFence(VkDevice _device
,
165 const VkFenceCreateInfo
*pCreateInfo
,
166 const VkAllocationCallbacks
*pAllocator
,
169 TU_FROM_HANDLE(tu_device
, device
, _device
);
171 struct tu_fence
*fence
=
172 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
176 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
178 tu_fence_init(fence
, pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
);
180 *pFence
= tu_fence_to_handle(fence
);
186 tu_DestroyFence(VkDevice _device
,
188 const VkAllocationCallbacks
*pAllocator
)
190 TU_FROM_HANDLE(tu_device
, device
, _device
);
191 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
196 tu_fence_finish(fence
);
198 vk_free2(&device
->alloc
, pAllocator
, fence
);
202 * Initialize a pollfd array from fences.
205 tu_fence_init_poll_fds(uint32_t fence_count
,
206 const VkFence
*fences
,
211 for (uint32_t i
= 0; i
< fence_count
; i
++) {
212 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
214 if (fence
->signaled
) {
216 /* skip signaled fences */
219 /* no need to poll any fd */
225 /* negative fds are never ready, which is the desired behavior */
226 fds
[nfds
].fd
= fence
->fd
;
227 fds
[nfds
].events
= POLLIN
;
228 fds
[nfds
].revents
= 0;
236 * Translate timeout from nanoseconds to milliseconds for poll().
239 tu_fence_get_poll_timeout(uint64_t timeout_ns
)
241 const uint64_t ns_per_ms
= 1000 * 1000;
242 uint64_t timeout_ms
= timeout_ns
/ ns_per_ms
;
244 /* round up if needed */
245 if (timeout_ns
- timeout_ms
* ns_per_ms
>= ns_per_ms
/ 2)
248 return timeout_ms
< INT_MAX
? timeout_ms
: INT_MAX
;
252 * Poll a pollfd array.
255 tu_fence_poll_fds(struct pollfd
*fds
, nfds_t nfds
, uint64_t *timeout_ns
)
259 uint64_t duration
= os_time_get_nano();
260 int ret
= poll(fds
, nfds
, tu_fence_get_poll_timeout(*timeout_ns
));
261 duration
= os_time_get_nano() - duration
;
263 /* update timeout_ns */
264 if (*timeout_ns
> duration
)
265 *timeout_ns
-= duration
;
271 } else if (ret
== 0) {
274 } else if (errno
!= EINTR
&& errno
!= EAGAIN
) {
275 return VK_ERROR_OUT_OF_HOST_MEMORY
;
281 * Update a pollfd array and the fence states. This should be called after a
282 * successful call to tu_fence_poll_fds.
285 tu_fence_update_fences_and_poll_fds(uint32_t fence_count
,
286 const VkFence
*fences
,
291 uint32_t fds_idx
= 0;
292 for (uint32_t i
= 0; i
< fence_count
; i
++) {
293 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
295 /* no signaled fence in fds */
299 /* fds[fds_idx] corresponds to fences[i] */
300 assert(fence
->fd
== fds
[fds_idx
].fd
);
302 assert(nfds
<= fds_idx
&& fds_idx
<= i
);
304 /* fd is ready (errors are treated as ready) */
305 if (fds
[fds_idx
].revents
) {
306 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
307 } else if (wait_all
) {
308 /* add to fds again for another poll */
309 fds
[nfds
].fd
= fence
->fd
;
310 fds
[nfds
].events
= POLLIN
;
311 fds
[nfds
].revents
= 0;
322 tu_WaitForFences(VkDevice _device
,
324 const VkFence
*pFences
,
328 TU_FROM_HANDLE(tu_device
, device
, _device
);
330 /* add a simpler path for when fenceCount == 1? */
332 struct pollfd stack_fds
[8];
333 struct pollfd
*fds
= stack_fds
;
334 if (fenceCount
> ARRAY_SIZE(stack_fds
)) {
335 fds
= vk_alloc(&device
->alloc
, sizeof(*fds
) * fenceCount
, 8,
336 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
338 return VK_ERROR_OUT_OF_HOST_MEMORY
;
341 /* set up pollfd array and start polling */
342 nfds_t nfds
= tu_fence_init_poll_fds(fenceCount
, pFences
, waitAll
, fds
);
343 VkResult result
= VK_SUCCESS
;
345 result
= tu_fence_poll_fds(fds
, nfds
, &timeout
);
346 if (result
!= VK_SUCCESS
)
348 nfds
= tu_fence_update_fences_and_poll_fds(fenceCount
, pFences
, waitAll
,
352 if (fds
!= stack_fds
)
353 vk_free(&device
->alloc
, fds
);
355 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
356 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
357 if (fence
->fence_wsi
) {
358 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, timeout
);
359 if (result
!= VK_SUCCESS
)
368 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
370 for (unsigned i
= 0; i
< fenceCount
; ++i
) {
371 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
372 assert(tu_fence_get_state(fence
) != TU_FENCE_STATE_PENDING
);
373 tu_fence_set_state(fence
, TU_FENCE_STATE_RESET
, -1);
380 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
382 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
384 if (fence
->fd
>= 0) {
385 int err
= sync_wait(fence
->fd
, 0);
387 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
388 else if (err
&& errno
!= ETIME
)
389 return VK_ERROR_OUT_OF_HOST_MEMORY
;
391 if (fence
->fence_wsi
) {
392 VkResult result
= fence
->fence_wsi
->wait(fence
->fence_wsi
, 0);
394 if (result
!= VK_SUCCESS
) {
395 if (result
== VK_TIMEOUT
)
401 return fence
->signaled
? VK_SUCCESS
: VK_NOT_READY
;