9e4a92370f88404f522423b00179199be00a3aa9
[mesa.git] / src / freedreno / vulkan / tu_fence.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include <fcntl.h>
27 #include <libsync.h>
28 #include <unistd.h>
29
30 #include "util/os_time.h"
31
32 /**
33 * Internally, a fence can be in one of these states.
34 */
35 enum tu_fence_state
36 {
37 TU_FENCE_STATE_RESET,
38 TU_FENCE_STATE_PENDING,
39 TU_FENCE_STATE_SIGNALED,
40 };
41
42 static enum tu_fence_state
43 tu_fence_get_state(const struct tu_fence *fence)
44 {
45 if (fence->signaled)
46 assert(fence->fd < 0);
47
48 if (fence->signaled)
49 return TU_FENCE_STATE_SIGNALED;
50 else if (fence->fd >= 0)
51 return TU_FENCE_STATE_PENDING;
52 else
53 return TU_FENCE_STATE_RESET;
54 }
55
56 static void
57 tu_fence_set_state(struct tu_fence *fence, enum tu_fence_state state, int fd)
58 {
59 if (fence->fd >= 0)
60 close(fence->fd);
61
62 switch (state) {
63 case TU_FENCE_STATE_RESET:
64 assert(fd < 0);
65 fence->signaled = false;
66 fence->fd = -1;
67 break;
68 case TU_FENCE_STATE_PENDING:
69 assert(fd >= 0);
70 fence->signaled = false;
71 fence->fd = fd;
72 break;
73 case TU_FENCE_STATE_SIGNALED:
74 assert(fd < 0);
75 fence->signaled = true;
76 fence->fd = -1;
77 break;
78 default:
79 unreachable("unknown fence state");
80 break;
81 }
82 }
83
84 void
85 tu_fence_init(struct tu_fence *fence, bool signaled)
86 {
87 fence->signaled = signaled;
88 fence->fd = -1;
89 fence->fence_wsi = NULL;
90 }
91
92 void
93 tu_fence_finish(struct tu_fence *fence)
94 {
95 if (fence->fd >= 0)
96 close(fence->fd);
97 if (fence->fence_wsi)
98 fence->fence_wsi->destroy(fence->fence_wsi);
99 }
100
101 /**
102 * Update the associated fd of a fence. Ownership of \a fd is transferred to
103 * \a fence.
104 *
105 * This function does not block. \a fence can also be in any state when this
106 * function is called. To be able to do that, the caller must make sure that,
107 * when both the currently associated fd and the new fd are valid, they are on
108 * the same timeline with the new fd being later on the timeline.
109 */
110 void
111 tu_fence_update_fd(struct tu_fence *fence, int fd)
112 {
113 const enum tu_fence_state state =
114 fd >= 0 ? TU_FENCE_STATE_PENDING : TU_FENCE_STATE_SIGNALED;
115 tu_fence_set_state(fence, state, fd);
116 }
117
118 /**
119 * Make a fence a copy of another fence. \a fence must be in the reset state.
120 */
121 void
122 tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src)
123 {
124 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
125
126 /* dup src->fd */
127 int fd = -1;
128 if (src->fd >= 0) {
129 fd = fcntl(src->fd, F_DUPFD_CLOEXEC, 0);
130 if (fd < 0) {
131 tu_loge("failed to dup fd %d for fence", src->fd);
132 sync_wait(src->fd, -1);
133 }
134 }
135
136 tu_fence_update_fd(fence, fd);
137 }
138
139 /**
140 * Signal a fence. \a fence must be in the reset state.
141 */
142 void
143 tu_fence_signal(struct tu_fence *fence)
144 {
145 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
146 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
147 }
148
149 /**
150 * Wait until a fence is idle (i.e., not pending).
151 */
152 void
153 tu_fence_wait_idle(struct tu_fence *fence)
154 {
155 if (fence->fd >= 0) {
156 if (sync_wait(fence->fd, -1))
157 tu_loge("sync_wait on fence fd %d failed", fence->fd);
158
159 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
160 }
161 }
162
163 VkResult
164 tu_CreateFence(VkDevice _device,
165 const VkFenceCreateInfo *pCreateInfo,
166 const VkAllocationCallbacks *pAllocator,
167 VkFence *pFence)
168 {
169 TU_FROM_HANDLE(tu_device, device, _device);
170
171 struct tu_fence *fence =
172 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
174
175 if (!fence)
176 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
177
178 tu_fence_init(fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
179
180 *pFence = tu_fence_to_handle(fence);
181
182 return VK_SUCCESS;
183 }
184
185 void
186 tu_DestroyFence(VkDevice _device,
187 VkFence _fence,
188 const VkAllocationCallbacks *pAllocator)
189 {
190 TU_FROM_HANDLE(tu_device, device, _device);
191 TU_FROM_HANDLE(tu_fence, fence, _fence);
192
193 if (!fence)
194 return;
195
196 tu_fence_finish(fence);
197
198 vk_free2(&device->alloc, pAllocator, fence);
199 }
200
201 /**
202 * Initialize a pollfd array from fences.
203 */
204 static nfds_t
205 tu_fence_init_poll_fds(uint32_t fence_count,
206 const VkFence *fences,
207 bool wait_all,
208 struct pollfd *fds)
209 {
210 nfds_t nfds = 0;
211 for (uint32_t i = 0; i < fence_count; i++) {
212 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
213
214 /* skip wsi fences */
215 if (fence->fence_wsi)
216 continue;
217
218 if (fence->signaled) {
219 if (wait_all) {
220 /* skip signaled fences */
221 continue;
222 } else {
223 /* no need to poll any fd */
224 nfds = 0;
225 break;
226 }
227 }
228
229 /* negative fds are never ready, which is the desired behavior */
230 fds[nfds].fd = fence->fd;
231 fds[nfds].events = POLLIN;
232 fds[nfds].revents = 0;
233 nfds++;
234 }
235
236 return nfds;
237 }
238
239 /**
240 * Translate timeout from nanoseconds to milliseconds for poll().
241 */
242 static int
243 tu_fence_get_poll_timeout(uint64_t timeout_ns)
244 {
245 const uint64_t ns_per_ms = 1000 * 1000;
246 uint64_t timeout_ms = timeout_ns / ns_per_ms;
247
248 /* round up if needed */
249 if (timeout_ns - timeout_ms * ns_per_ms >= ns_per_ms / 2)
250 timeout_ms++;
251
252 return timeout_ms < INT_MAX ? timeout_ms : INT_MAX;
253 }
254
255 /**
256 * Poll a pollfd array.
257 */
258 static VkResult
259 tu_fence_poll_fds(struct pollfd *fds, nfds_t nfds, uint64_t *timeout_ns)
260 {
261 while (true) {
262 /* poll */
263 uint64_t duration = os_time_get_nano();
264 int ret = poll(fds, nfds, tu_fence_get_poll_timeout(*timeout_ns));
265 duration = os_time_get_nano() - duration;
266
267 /* update timeout_ns */
268 if (*timeout_ns > duration)
269 *timeout_ns -= duration;
270 else
271 *timeout_ns = 0;
272
273 if (ret > 0) {
274 return VK_SUCCESS;
275 } else if (ret == 0) {
276 if (!*timeout_ns)
277 return VK_TIMEOUT;
278 } else if (errno != EINTR && errno != EAGAIN) {
279 return VK_ERROR_OUT_OF_HOST_MEMORY;
280 }
281 }
282 }
283
284 /**
285 * Update a pollfd array and the fence states. This should be called after a
286 * successful call to tu_fence_poll_fds.
287 */
288 static nfds_t
289 tu_fence_update_fences_and_poll_fds(uint32_t fence_count,
290 const VkFence *fences,
291 bool wait_all,
292 struct pollfd *fds)
293 {
294 uint32_t nfds = 0;
295 uint32_t fds_idx = 0;
296 for (uint32_t i = 0; i < fence_count; i++) {
297 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
298
299 /* skip wsi fences */
300 if (fence->fence_wsi)
301 continue;
302
303 /* no signaled fence in fds */
304 if (fence->signaled)
305 continue;
306
307 /* fds[fds_idx] corresponds to fences[i] */
308 assert(fence->fd == fds[fds_idx].fd);
309
310 assert(nfds <= fds_idx && fds_idx <= i);
311
312 /* fd is ready (errors are treated as ready) */
313 if (fds[fds_idx].revents) {
314 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
315 } else if (wait_all) {
316 /* add to fds again for another poll */
317 fds[nfds].fd = fence->fd;
318 fds[nfds].events = POLLIN;
319 fds[nfds].revents = 0;
320 nfds++;
321 }
322
323 fds_idx++;
324 }
325
326 return nfds;
327 }
328
329 VkResult
330 tu_WaitForFences(VkDevice _device,
331 uint32_t fenceCount,
332 const VkFence *pFences,
333 VkBool32 waitAll,
334 uint64_t timeout)
335 {
336 TU_FROM_HANDLE(tu_device, device, _device);
337
338 /* add a simpler path for when fenceCount == 1? */
339
340 struct pollfd stack_fds[8];
341 struct pollfd *fds = stack_fds;
342 if (fenceCount > ARRAY_SIZE(stack_fds)) {
343 fds = vk_alloc(&device->alloc, sizeof(*fds) * fenceCount, 8,
344 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
345 if (!fds)
346 return VK_ERROR_OUT_OF_HOST_MEMORY;
347 }
348
349 /* set up pollfd array and start polling */
350 nfds_t nfds = tu_fence_init_poll_fds(fenceCount, pFences, waitAll, fds);
351 VkResult result = VK_SUCCESS;
352 while (nfds) {
353 result = tu_fence_poll_fds(fds, nfds, &timeout);
354 if (result != VK_SUCCESS)
355 break;
356 nfds = tu_fence_update_fences_and_poll_fds(fenceCount, pFences, waitAll,
357 fds);
358 }
359
360 if (fds != stack_fds)
361 vk_free(&device->alloc, fds);
362
363 if (result != VK_SUCCESS)
364 return result;
365
366 for (uint32_t i = 0; i < fenceCount; ++i) {
367 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
368 if (fence->fence_wsi) {
369 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
370 if (result != VK_SUCCESS)
371 return result;
372 }
373 }
374
375 return result;
376 }
377
378 VkResult
379 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
380 {
381 for (unsigned i = 0; i < fenceCount; ++i) {
382 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
383 assert(tu_fence_get_state(fence) != TU_FENCE_STATE_PENDING);
384 tu_fence_set_state(fence, TU_FENCE_STATE_RESET, -1);
385 }
386
387 return VK_SUCCESS;
388 }
389
390 VkResult
391 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
392 {
393 TU_FROM_HANDLE(tu_fence, fence, _fence);
394
395 if (fence->fd >= 0) {
396 int err = sync_wait(fence->fd, 0);
397 if (!err)
398 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
399 else if (err && errno != ETIME)
400 return VK_ERROR_OUT_OF_HOST_MEMORY;
401 }
402 if (fence->fence_wsi) {
403 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
404
405 if (result != VK_SUCCESS) {
406 if (result == VK_TIMEOUT)
407 return VK_NOT_READY;
408 return result;
409 }
410 }
411
412 return fence->signaled ? VK_SUCCESS : VK_NOT_READY;
413 }