turnip: add display wsi
[mesa.git] / src / freedreno / vulkan / tu_fence.c
1 /*
2 * Copyright © 2019 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "tu_private.h"
25
26 #include <fcntl.h>
27 #include <libsync.h>
28 #include <unistd.h>
29
30 #include "util/os_time.h"
31
32 /**
33 * Internally, a fence can be in one of these states.
34 */
35 enum tu_fence_state
36 {
37 TU_FENCE_STATE_RESET,
38 TU_FENCE_STATE_PENDING,
39 TU_FENCE_STATE_SIGNALED,
40 };
41
42 static enum tu_fence_state
43 tu_fence_get_state(const struct tu_fence *fence)
44 {
45 if (fence->signaled)
46 assert(fence->fd < 0);
47
48 if (fence->signaled)
49 return TU_FENCE_STATE_SIGNALED;
50 else if (fence->fd >= 0)
51 return TU_FENCE_STATE_PENDING;
52 else
53 return TU_FENCE_STATE_RESET;
54 }
55
56 static void
57 tu_fence_set_state(struct tu_fence *fence, enum tu_fence_state state, int fd)
58 {
59 if (fence->fd >= 0)
60 close(fence->fd);
61
62 switch (state) {
63 case TU_FENCE_STATE_RESET:
64 assert(fd < 0);
65 fence->signaled = false;
66 fence->fd = -1;
67 break;
68 case TU_FENCE_STATE_PENDING:
69 assert(fd >= 0);
70 fence->signaled = false;
71 fence->fd = fd;
72 break;
73 case TU_FENCE_STATE_SIGNALED:
74 assert(fd < 0);
75 fence->signaled = true;
76 fence->fd = -1;
77 break;
78 default:
79 unreachable("unknown fence state");
80 break;
81 }
82 }
83
84 void
85 tu_fence_init(struct tu_fence *fence, bool signaled)
86 {
87 fence->signaled = signaled;
88 fence->fd = -1;
89 fence->fence_wsi = NULL;
90 }
91
92 void
93 tu_fence_finish(struct tu_fence *fence)
94 {
95 if (fence->fd >= 0)
96 close(fence->fd);
97 if (fence->fence_wsi)
98 fence->fence_wsi->destroy(fence->fence_wsi);
99 }
100
101 /**
102 * Update the associated fd of a fence. Ownership of \a fd is transferred to
103 * \a fence.
104 *
105 * This function does not block. \a fence can also be in any state when this
106 * function is called. To be able to do that, the caller must make sure that,
107 * when both the currently associated fd and the new fd are valid, they are on
108 * the same timeline with the new fd being later on the timeline.
109 */
110 void
111 tu_fence_update_fd(struct tu_fence *fence, int fd)
112 {
113 const enum tu_fence_state state =
114 fd >= 0 ? TU_FENCE_STATE_PENDING : TU_FENCE_STATE_SIGNALED;
115 tu_fence_set_state(fence, state, fd);
116 }
117
118 /**
119 * Make a fence a copy of another fence. \a fence must be in the reset state.
120 */
121 void
122 tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src)
123 {
124 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
125
126 /* dup src->fd */
127 int fd = -1;
128 if (src->fd >= 0) {
129 fd = fcntl(src->fd, F_DUPFD_CLOEXEC, 0);
130 if (fd < 0) {
131 tu_loge("failed to dup fd %d for fence", src->fd);
132 sync_wait(src->fd, -1);
133 }
134 }
135
136 tu_fence_update_fd(fence, fd);
137 }
138
139 /**
140 * Signal a fence. \a fence must be in the reset state.
141 */
142 void
143 tu_fence_signal(struct tu_fence *fence)
144 {
145 assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
146 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
147 }
148
149 /**
150 * Wait until a fence is idle (i.e., not pending).
151 */
152 void
153 tu_fence_wait_idle(struct tu_fence *fence)
154 {
155 if (fence->fd >= 0) {
156 if (sync_wait(fence->fd, -1))
157 tu_loge("sync_wait on fence fd %d failed", fence->fd);
158
159 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
160 }
161 }
162
163 VkResult
164 tu_CreateFence(VkDevice _device,
165 const VkFenceCreateInfo *pCreateInfo,
166 const VkAllocationCallbacks *pAllocator,
167 VkFence *pFence)
168 {
169 TU_FROM_HANDLE(tu_device, device, _device);
170
171 struct tu_fence *fence =
172 vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
173 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
174
175 if (!fence)
176 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
177
178 tu_fence_init(fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
179
180 *pFence = tu_fence_to_handle(fence);
181
182 return VK_SUCCESS;
183 }
184
185 void
186 tu_DestroyFence(VkDevice _device,
187 VkFence _fence,
188 const VkAllocationCallbacks *pAllocator)
189 {
190 TU_FROM_HANDLE(tu_device, device, _device);
191 TU_FROM_HANDLE(tu_fence, fence, _fence);
192
193 if (!fence)
194 return;
195
196 tu_fence_finish(fence);
197
198 vk_free2(&device->alloc, pAllocator, fence);
199 }
200
201 /**
202 * Initialize a pollfd array from fences.
203 */
204 static nfds_t
205 tu_fence_init_poll_fds(uint32_t fence_count,
206 const VkFence *fences,
207 bool wait_all,
208 struct pollfd *fds)
209 {
210 nfds_t nfds = 0;
211 for (uint32_t i = 0; i < fence_count; i++) {
212 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
213
214 if (fence->signaled) {
215 if (wait_all) {
216 /* skip signaled fences */
217 continue;
218 } else {
219 /* no need to poll any fd */
220 nfds = 0;
221 break;
222 }
223 }
224
225 /* negative fds are never ready, which is the desired behavior */
226 fds[nfds].fd = fence->fd;
227 fds[nfds].events = POLLIN;
228 fds[nfds].revents = 0;
229 nfds++;
230 }
231
232 return nfds;
233 }
234
235 /**
236 * Translate timeout from nanoseconds to milliseconds for poll().
237 */
238 static int
239 tu_fence_get_poll_timeout(uint64_t timeout_ns)
240 {
241 const uint64_t ns_per_ms = 1000 * 1000;
242 uint64_t timeout_ms = timeout_ns / ns_per_ms;
243
244 /* round up if needed */
245 if (timeout_ns - timeout_ms * ns_per_ms >= ns_per_ms / 2)
246 timeout_ms++;
247
248 return timeout_ms < INT_MAX ? timeout_ms : INT_MAX;
249 }
250
251 /**
252 * Poll a pollfd array.
253 */
254 static VkResult
255 tu_fence_poll_fds(struct pollfd *fds, nfds_t nfds, uint64_t *timeout_ns)
256 {
257 while (true) {
258 /* poll */
259 uint64_t duration = os_time_get_nano();
260 int ret = poll(fds, nfds, tu_fence_get_poll_timeout(*timeout_ns));
261 duration = os_time_get_nano() - duration;
262
263 /* update timeout_ns */
264 if (*timeout_ns > duration)
265 *timeout_ns -= duration;
266 else
267 *timeout_ns = 0;
268
269 if (ret > 0) {
270 return VK_SUCCESS;
271 } else if (ret == 0) {
272 if (!*timeout_ns)
273 return VK_TIMEOUT;
274 } else if (errno != EINTR && errno != EAGAIN) {
275 return VK_ERROR_OUT_OF_HOST_MEMORY;
276 }
277 }
278 }
279
280 /**
281 * Update a pollfd array and the fence states. This should be called after a
282 * successful call to tu_fence_poll_fds.
283 */
284 static nfds_t
285 tu_fence_update_fences_and_poll_fds(uint32_t fence_count,
286 const VkFence *fences,
287 bool wait_all,
288 struct pollfd *fds)
289 {
290 uint32_t nfds = 0;
291 uint32_t fds_idx = 0;
292 for (uint32_t i = 0; i < fence_count; i++) {
293 TU_FROM_HANDLE(tu_fence, fence, fences[i]);
294
295 /* no signaled fence in fds */
296 if (fence->signaled)
297 continue;
298
299 /* fds[fds_idx] corresponds to fences[i] */
300 assert(fence->fd == fds[fds_idx].fd);
301
302 assert(nfds <= fds_idx && fds_idx <= i);
303
304 /* fd is ready (errors are treated as ready) */
305 if (fds[fds_idx].revents) {
306 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
307 } else if (wait_all) {
308 /* add to fds again for another poll */
309 fds[nfds].fd = fence->fd;
310 fds[nfds].events = POLLIN;
311 fds[nfds].revents = 0;
312 nfds++;
313 }
314
315 fds_idx++;
316 }
317
318 return nfds;
319 }
320
321 VkResult
322 tu_WaitForFences(VkDevice _device,
323 uint32_t fenceCount,
324 const VkFence *pFences,
325 VkBool32 waitAll,
326 uint64_t timeout)
327 {
328 TU_FROM_HANDLE(tu_device, device, _device);
329
330 /* add a simpler path for when fenceCount == 1? */
331
332 struct pollfd stack_fds[8];
333 struct pollfd *fds = stack_fds;
334 if (fenceCount > ARRAY_SIZE(stack_fds)) {
335 fds = vk_alloc(&device->alloc, sizeof(*fds) * fenceCount, 8,
336 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
337 if (!fds)
338 return VK_ERROR_OUT_OF_HOST_MEMORY;
339 }
340
341 /* set up pollfd array and start polling */
342 nfds_t nfds = tu_fence_init_poll_fds(fenceCount, pFences, waitAll, fds);
343 VkResult result = VK_SUCCESS;
344 while (nfds) {
345 result = tu_fence_poll_fds(fds, nfds, &timeout);
346 if (result != VK_SUCCESS)
347 break;
348 nfds = tu_fence_update_fences_and_poll_fds(fenceCount, pFences, waitAll,
349 fds);
350 }
351
352 if (fds != stack_fds)
353 vk_free(&device->alloc, fds);
354
355 for (uint32_t i = 0; i < fenceCount; ++i) {
356 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
357 if (fence->fence_wsi) {
358 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
359 if (result != VK_SUCCESS)
360 return result;
361 }
362 }
363
364 return result;
365 }
366
367 VkResult
368 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
369 {
370 for (unsigned i = 0; i < fenceCount; ++i) {
371 TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
372 assert(tu_fence_get_state(fence) != TU_FENCE_STATE_PENDING);
373 tu_fence_set_state(fence, TU_FENCE_STATE_RESET, -1);
374 }
375
376 return VK_SUCCESS;
377 }
378
379 VkResult
380 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
381 {
382 TU_FROM_HANDLE(tu_fence, fence, _fence);
383
384 if (fence->fd >= 0) {
385 int err = sync_wait(fence->fd, 0);
386 if (!err)
387 tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
388 else if (err && errno != ETIME)
389 return VK_ERROR_OUT_OF_HOST_MEMORY;
390 }
391 if (fence->fence_wsi) {
392 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
393
394 if (result != VK_SUCCESS) {
395 if (result == VK_TIMEOUT)
396 return VK_NOT_READY;
397 return result;
398 }
399 }
400
401 return fence->signaled ? VK_SUCCESS : VK_NOT_READY;
402 }