2 * Copyright © 2019 Google LLC
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "tu_private.h"
30 #include "util/os_time.h"
33 * Internally, a fence can be in one of these states.
38 TU_FENCE_STATE_PENDING
,
39 TU_FENCE_STATE_SIGNALED
,
42 static enum tu_fence_state
43 tu_fence_get_state(const struct tu_fence
*fence
)
46 assert(fence
->fd
< 0);
49 return TU_FENCE_STATE_SIGNALED
;
50 else if (fence
->fd
>= 0)
51 return TU_FENCE_STATE_PENDING
;
53 return TU_FENCE_STATE_RESET
;
57 tu_fence_set_state(struct tu_fence
*fence
, enum tu_fence_state state
, int fd
)
63 case TU_FENCE_STATE_RESET
:
65 fence
->signaled
= false;
68 case TU_FENCE_STATE_PENDING
:
70 fence
->signaled
= false;
73 case TU_FENCE_STATE_SIGNALED
:
75 fence
->signaled
= true;
79 unreachable("unknown fence state");
85 tu_fence_init(struct tu_fence
*fence
, bool signaled
)
87 fence
->signaled
= signaled
;
92 tu_fence_finish(struct tu_fence
*fence
)
99 * Update the associated fd of a fence. Ownership of \a fd is transferred to
102 * This function does not block. \a fence can also be in any state when this
103 * function is called. To be able to do that, the caller must make sure that,
104 * when both the currently associated fd and the new fd are valid, they are on
105 * the same timeline with the new fd being later on the timeline.
108 tu_fence_update_fd(struct tu_fence
*fence
, int fd
)
110 const enum tu_fence_state state
=
111 fd
>= 0 ? TU_FENCE_STATE_PENDING
: TU_FENCE_STATE_SIGNALED
;
112 tu_fence_set_state(fence
, state
, fd
);
116 * Make a fence a copy of another fence. \a fence must be in the reset state.
119 tu_fence_copy(struct tu_fence
*fence
, const struct tu_fence
*src
)
121 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
126 fd
= fcntl(src
->fd
, F_DUPFD_CLOEXEC
, 0);
128 tu_loge("failed to dup fd %d for fence", src
->fd
);
129 sync_wait(src
->fd
, -1);
133 tu_fence_update_fd(fence
, fd
);
137 * Signal a fence. \a fence must be in the reset state.
140 tu_fence_signal(struct tu_fence
*fence
)
142 assert(tu_fence_get_state(fence
) == TU_FENCE_STATE_RESET
);
143 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
147 * Wait until a fence is idle (i.e., not pending).
150 tu_fence_wait_idle(struct tu_fence
*fence
)
152 if (fence
->fd
>= 0) {
153 if (sync_wait(fence
->fd
, -1))
154 tu_loge("sync_wait on fence fd %d failed", fence
->fd
);
156 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
161 tu_CreateFence(VkDevice _device
,
162 const VkFenceCreateInfo
*pCreateInfo
,
163 const VkAllocationCallbacks
*pAllocator
,
166 TU_FROM_HANDLE(tu_device
, device
, _device
);
168 struct tu_fence
*fence
=
169 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
170 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
173 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
175 tu_fence_init(fence
, pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
);
177 *pFence
= tu_fence_to_handle(fence
);
183 tu_DestroyFence(VkDevice _device
,
185 const VkAllocationCallbacks
*pAllocator
)
187 TU_FROM_HANDLE(tu_device
, device
, _device
);
188 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
193 tu_fence_finish(fence
);
195 vk_free2(&device
->alloc
, pAllocator
, fence
);
199 * Initialize a pollfd array from fences.
202 tu_fence_init_poll_fds(uint32_t fence_count
,
203 const VkFence
*fences
,
208 for (uint32_t i
= 0; i
< fence_count
; i
++) {
209 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
211 if (fence
->signaled
) {
213 /* skip signaled fences */
216 /* no need to poll any fd */
222 /* negative fds are never ready, which is the desired behavior */
223 fds
[nfds
].fd
= fence
->fd
;
224 fds
[nfds
].events
= POLLIN
;
225 fds
[nfds
].revents
= 0;
233 * Translate timeout from nanoseconds to milliseconds for poll().
236 tu_fence_get_poll_timeout(uint64_t timeout_ns
)
238 const uint64_t ns_per_ms
= 1000 * 1000;
239 uint64_t timeout_ms
= timeout_ns
/ ns_per_ms
;
241 /* round up if needed */
242 if (timeout_ns
- timeout_ms
* ns_per_ms
>= ns_per_ms
/ 2)
245 return timeout_ms
< INT_MAX
? timeout_ms
: INT_MAX
;
249 * Poll a pollfd array.
252 tu_fence_poll_fds(struct pollfd
*fds
, nfds_t nfds
, uint64_t *timeout_ns
)
256 uint64_t duration
= os_time_get_nano();
257 int ret
= poll(fds
, nfds
, tu_fence_get_poll_timeout(*timeout_ns
));
258 duration
= os_time_get_nano() - duration
;
260 /* update timeout_ns */
261 if (*timeout_ns
> duration
)
262 *timeout_ns
-= duration
;
268 } else if (ret
== 0) {
271 } else if (errno
!= EINTR
&& errno
!= EAGAIN
) {
272 return VK_ERROR_OUT_OF_HOST_MEMORY
;
278 * Update a pollfd array and the fence states. This should be called after a
279 * successful call to tu_fence_poll_fds.
282 tu_fence_update_fences_and_poll_fds(uint32_t fence_count
,
283 const VkFence
*fences
,
288 uint32_t fds_idx
= 0;
289 for (uint32_t i
= 0; i
< fence_count
; i
++) {
290 TU_FROM_HANDLE(tu_fence
, fence
, fences
[i
]);
292 /* no signaled fence in fds */
296 /* fds[fds_idx] corresponds to fences[i] */
297 assert(fence
->fd
== fds
[fds_idx
].fd
);
299 assert(nfds
<= fds_idx
&& fds_idx
<= i
);
301 /* fd is ready (errors are treated as ready) */
302 if (fds
[fds_idx
].revents
) {
303 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
304 } else if (wait_all
) {
305 /* add to fds again for another poll */
306 fds
[nfds
].fd
= fence
->fd
;
307 fds
[nfds
].events
= POLLIN
;
308 fds
[nfds
].revents
= 0;
319 tu_WaitForFences(VkDevice _device
,
321 const VkFence
*pFences
,
325 TU_FROM_HANDLE(tu_device
, device
, _device
);
327 /* add a simpler path for when fenceCount == 1? */
329 struct pollfd stack_fds
[8];
330 struct pollfd
*fds
= stack_fds
;
331 if (fenceCount
> ARRAY_SIZE(stack_fds
)) {
332 fds
= vk_alloc(&device
->alloc
, sizeof(*fds
) * fenceCount
, 8,
333 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
335 return VK_ERROR_OUT_OF_HOST_MEMORY
;
338 /* set up pollfd array and start polling */
339 nfds_t nfds
= tu_fence_init_poll_fds(fenceCount
, pFences
, waitAll
, fds
);
340 VkResult result
= VK_SUCCESS
;
342 result
= tu_fence_poll_fds(fds
, nfds
, &timeout
);
343 if (result
!= VK_SUCCESS
)
345 nfds
= tu_fence_update_fences_and_poll_fds(fenceCount
, pFences
, waitAll
,
349 if (fds
!= stack_fds
)
350 vk_free(&device
->alloc
, fds
);
356 tu_ResetFences(VkDevice _device
, uint32_t fenceCount
, const VkFence
*pFences
)
358 for (unsigned i
= 0; i
< fenceCount
; ++i
) {
359 TU_FROM_HANDLE(tu_fence
, fence
, pFences
[i
]);
360 assert(tu_fence_get_state(fence
) != TU_FENCE_STATE_PENDING
);
361 tu_fence_set_state(fence
, TU_FENCE_STATE_RESET
, -1);
368 tu_GetFenceStatus(VkDevice _device
, VkFence _fence
)
370 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
372 if (fence
->fd
>= 0) {
373 int err
= sync_wait(fence
->fd
, 0);
375 tu_fence_set_state(fence
, TU_FENCE_STATE_SIGNALED
, -1);
376 else if (err
&& errno
!= ETIME
)
377 return VK_ERROR_OUT_OF_HOST_MEMORY
;
380 return fence
->signaled
? VK_SUCCESS
: VK_NOT_READY
;