replace all F_DUPFD_CLOEXEC with os_dupfd_cloexec()
[mesa.git] / src / freedreno / vulkan / tu_fence.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include <fcntl.h>
27 #include <libsync.h>
28 #include <unistd.h>
29
30 #include "util/os_file.h"
31 #include "util/os_time.h"
32
33 /**
34 * Internally, a fence can be in one of these states.
35 */
36 enum tu_fence_state
37 {
38 TU_FENCE_STATE_RESET,
39 TU_FENCE_STATE_PENDING,
40 TU_FENCE_STATE_SIGNALED,
41 };
42
43 static enum tu_fence_state
44 tu_fence_get_state(const struct tu_fence *fence)
45 {
46 if (fence->signaled)
47 assert(fence->fd < 0);
48
49 if (fence->signaled)
50 return TU_FENCE_STATE_SIGNALED;
51 else if (fence->fd >= 0)
52 return TU_FENCE_STATE_PENDING;
53 else
54 return TU_FENCE_STATE_RESET;
55 }
56
57 static void
58 tu_fence_set_state(struct tu_fence *fence, enum tu_fence_state state, int fd)
59 {
60 if (fence->fd >= 0)
61 close(fence->fd);
62
63 switch (state) {
64 case TU_FENCE_STATE_RESET:
65 assert(fd < 0);
66 fence->signaled = false;
67 fence->fd = -1;
68 break;
69 case TU_FENCE_STATE_PENDING:
70 assert(fd >= 0);
71 fence->signaled = false;
72 fence->fd = fd;
73 break;
74 case TU_FENCE_STATE_SIGNALED:
75 assert(fd < 0);
76 fence->signaled = true;
77 fence->fd = -1;
78 break;
79 default:
80 unreachable("unknown fence state");
81 break;
82 }
83 }
84
85 void
86 tu_fence_init(struct tu_fence *fence, bool signaled)
87 {
88 fence->signaled = signaled;
89 fence->fd = -1;
90 fence->fence_wsi = NULL;
91 }
92
93 void
94 tu_fence_finish(struct tu_fence *fence)
95 {
96 if (fence->fd >= 0)
97 close(fence->fd);
98 if (fence->fence_wsi)
99 fence->fence_wsi->destroy(fence->fence_wsi);
100 }
101
102 /**
103 * Update the associated fd of a fence. Ownership of \a fd is transferred to
104 * \a fence.
105 *
106 * This function does not block. \a fence can also be in any state when this
107 * function is called. To be able to do that, the caller must make sure that,
108 * when both the currently associated fd and the new fd are valid, they are on
109 * the same timeline with the new fd being later on the timeline.
110 */
111 void
112 tu_fence_update_fd(struct tu_fence *fence, int fd)
113 {
114 const enum tu_fence_state state =
115 fd >= 0 ? TU_FENCE_STATE_PENDING : TU_FENCE_STATE_SIGNALED;
116 tu_fence_set_state(fence, state, fd);
117 }
118
119 /**
120 * Make a fence a copy of another fence. \a fence must be in the reset state.
121 */
122 void
123 tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src)
124 {
125 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
126
127 /* dup src->fd */
128 int fd = -1;
129 if (src->fd >= 0) {
130 fd = os_dupfd_cloexec(src->fd);
131 if (fd < 0) {
132 tu_loge("failed to dup fd %d for fence", src->fd);
133 sync_wait(src->fd, -1);
134 }
135 }
136
137 tu_fence_update_fd(fence, fd);
138 }
139
140 /**
141 * Signal a fence. \a fence must be in the reset state.
142 */
143 void
144 tu_fence_signal(struct tu_fence *fence)
145 {
146 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
147 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
148 }
149
150 /**
151 * Wait until a fence is idle (i.e., not pending).
152 */
153 void
154 tu_fence_wait_idle(struct tu_fence *fence)
155 {
156 if (fence->fd >= 0) {
157 if (sync_wait(fence->fd, -1))
158 tu_loge("sync_wait on fence fd %d failed", fence->fd);
159
160 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
161 }
162 }
163
164 VkResult
165 tu_CreateFence(VkDevice _device,
166 const VkFenceCreateInfo *pCreateInfo,
167 const VkAllocationCallbacks *pAllocator,
168 VkFence *pFence)
169 {
170 TU_FROM_HANDLE(tu_device, device, _device);
171
172 struct tu_fence *fence =
173 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
174 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
175
176 if (!fence)
177 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
178
179 tu_fence_init(fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
180
181 *pFence = tu_fence_to_handle(fence);
182
183 return VK_SUCCESS;
184 }
185
186 void
187 tu_DestroyFence(VkDevice _device,
188 VkFence _fence,
189 const VkAllocationCallbacks *pAllocator)
190 {
191 TU_FROM_HANDLE(tu_device, device, _device);
192 TU_FROM_HANDLE(tu_fence, fence, _fence);
193
194 if (!fence)
195 return;
196
197 tu_fence_finish(fence);
198
199 vk_free2(&device->alloc, pAllocator, fence);
200 }
201
202 /**
203 * Initialize a pollfd array from fences.
204 */
205 static nfds_t
206 tu_fence_init_poll_fds(uint32_t fence_count,
207 const VkFence *fences,
208 bool wait_all,
209 struct pollfd *fds)
210 {
211 nfds_t nfds = 0;
212 for (uint32_t i = 0; i < fence_count; i++) {
213 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
214
215 /* skip wsi fences */
216 if (fence->fence_wsi)
217 continue;
218
219 if (fence->signaled) {
220 if (wait_all) {
221 /* skip signaled fences */
222 continue;
223 } else {
224 /* no need to poll any fd */
225 nfds = 0;
226 break;
227 }
228 }
229
230 /* negative fds are never ready, which is the desired behavior */
231 fds[nfds].fd = fence->fd;
232 fds[nfds].events = POLLIN;
233 fds[nfds].revents = 0;
234 nfds++;
235 }
236
237 return nfds;
238 }
239
240 /**
241 * Translate timeout from nanoseconds to milliseconds for poll().
242 */
243 static int
244 tu_fence_get_poll_timeout(uint64_t timeout_ns)
245 {
246 const uint64_t ns_per_ms = 1000 * 1000;
247 uint64_t timeout_ms = timeout_ns / ns_per_ms;
248
249 /* round up if needed */
250 if (timeout_ns - timeout_ms * ns_per_ms >= ns_per_ms / 2)
251 timeout_ms++;
252
253 return timeout_ms < INT_MAX ? timeout_ms : INT_MAX;
254 }
255
256 /**
257 * Poll a pollfd array.
258 */
259 static VkResult
260 tu_fence_poll_fds(struct pollfd *fds, nfds_t nfds, uint64_t *timeout_ns)
261 {
262 while (true) {
263 /* poll */
264 uint64_t duration = os_time_get_nano();
265 int ret = poll(fds, nfds, tu_fence_get_poll_timeout(*timeout_ns));
266 duration = os_time_get_nano() - duration;
267
268 /* update timeout_ns */
269 if (*timeout_ns > duration)
270 *timeout_ns -= duration;
271 else
272 *timeout_ns = 0;
273
274 if (ret > 0) {
275 return VK_SUCCESS;
276 } else if (ret == 0) {
277 if (!*timeout_ns)
278 return VK_TIMEOUT;
279 } else if (errno != EINTR && errno != EAGAIN) {
280 return VK_ERROR_OUT_OF_HOST_MEMORY;
281 }
282 }
283 }
284
285 /**
286 * Update a pollfd array and the fence states. This should be called after a
287 * successful call to tu_fence_poll_fds.
288 */
289 static nfds_t
290 tu_fence_update_fences_and_poll_fds(uint32_t fence_count,
291 const VkFence *fences,
292 bool wait_all,
293 struct pollfd *fds)
294 {
295 uint32_t nfds = 0;
296 uint32_t fds_idx = 0;
297 for (uint32_t i = 0; i < fence_count; i++) {
298 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
299
300 /* skip wsi fences */
301 if (fence->fence_wsi)
302 continue;
303
304 /* no signaled fence in fds */
305 if (fence->signaled)
306 continue;
307
308 /* fds[fds_idx] corresponds to fences[i] */
309 assert(fence->fd == fds[fds_idx].fd);
310
311 assert(nfds <= fds_idx && fds_idx <= i);
312
313 /* fd is ready (errors are treated as ready) */
314 if (fds[fds_idx].revents) {
315 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
316 } else if (wait_all) {
317 /* add to fds again for another poll */
318 fds[nfds].fd = fence->fd;
319 fds[nfds].events = POLLIN;
320 fds[nfds].revents = 0;
321 nfds++;
322 }
323
324 fds_idx++;
325 }
326
327 return nfds;
328 }
329
330 VkResult
331 tu_WaitForFences(VkDevice _device,
332 uint32_t fenceCount,
333 const VkFence *pFences,
334 VkBool32 waitAll,
335 uint64_t timeout)
336 {
337 TU_FROM_HANDLE(tu_device, device, _device);
338
339 /* add a simpler path for when fenceCount == 1? */
340
341 struct pollfd stack_fds[8];
342 struct pollfd *fds = stack_fds;
343 if (fenceCount > ARRAY_SIZE(stack_fds)) {
344 fds = vk_alloc(&device->alloc, sizeof(*fds) * fenceCount, 8,
345 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
346 if (!fds)
347 return VK_ERROR_OUT_OF_HOST_MEMORY;
348 }
349
350 /* set up pollfd array and start polling */
351 nfds_t nfds = tu_fence_init_poll_fds(fenceCount, pFences, waitAll, fds);
352 VkResult result = VK_SUCCESS;
353 while (nfds) {
354 result = tu_fence_poll_fds(fds, nfds, &timeout);
355 if (result != VK_SUCCESS)
356 break;
357 nfds = tu_fence_update_fences_and_poll_fds(fenceCount, pFences, waitAll,
358 fds);
359 }
360
361 if (fds != stack_fds)
362 vk_free(&device->alloc, fds);
363
364 if (result != VK_SUCCESS)
365 return result;
366
367 for (uint32_t i = 0; i < fenceCount; ++i) {
368 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
369 if (fence->fence_wsi) {
370 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
371 if (result != VK_SUCCESS)
372 return result;
373 }
374 }
375
376 return result;
377 }
378
379 VkResult
380 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
381 {
382 for (unsigned i = 0; i < fenceCount; ++i) {
383 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
384 assert(tu_fence_get_state(fence) != TU_FENCE_STATE_PENDING);
385 tu_fence_set_state(fence, TU_FENCE_STATE_RESET, -1);
386 }
387
388 return VK_SUCCESS;
389 }
390
391 VkResult
392 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
393 {
394 TU_FROM_HANDLE(tu_fence, fence, _fence);
395
396 if (fence->fd >= 0) {
397 int err = sync_wait(fence->fd, 0);
398 if (!err)
399 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
400 else if (err && errno != ETIME)
401 return VK_ERROR_OUT_OF_HOST_MEMORY;
402 }
403 if (fence->fence_wsi) {
404 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
405
406 if (result != VK_SUCCESS) {
407 if (result == VK_TIMEOUT)
408 return VK_NOT_READY;
409 return result;
410 }
411 }
412
413 return fence->signaled ? VK_SUCCESS : VK_NOT_READY;
414 }