tree-wide: replace MAYBE_UNUSED with ASSERTED
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 VkResult
38 anv_device_execbuf(struct anv_device *device,
39 struct drm_i915_gem_execbuffer2 *execbuf,
40 struct anv_bo **execbuf_bos)
41 {
42 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
43 if (ret != 0) {
44 /* We don't know the real error. */
45 return anv_device_set_lost(device, "execbuf2 failed: %m");
46 }
47
48 struct drm_i915_gem_exec_object2 *objects =
49 (void *)(uintptr_t)execbuf->buffers_ptr;
50 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
51 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
52 assert(execbuf_bos[k]->offset == objects[k].offset);
53 execbuf_bos[k]->offset = objects[k].offset;
54 }
55
56 return VK_SUCCESS;
57 }
58
59 VkResult
60 anv_device_submit_simple_batch(struct anv_device *device,
61 struct anv_batch *batch)
62 {
63 struct drm_i915_gem_execbuffer2 execbuf;
64 struct drm_i915_gem_exec_object2 exec2_objects[1];
65 struct anv_bo bo, *exec_bos[1];
66 VkResult result = VK_SUCCESS;
67 uint32_t size;
68
69 /* Kernel driver requires 8 byte aligned batch length */
70 size = align_u32(batch->next - batch->start, 8);
71 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
72 if (result != VK_SUCCESS)
73 return result;
74
75 memcpy(bo.map, batch->start, size);
76 if (!device->info.has_llc)
77 gen_flush_range(bo.map, size);
78
79 exec_bos[0] = &bo;
80 exec2_objects[0].handle = bo.gem_handle;
81 exec2_objects[0].relocation_count = 0;
82 exec2_objects[0].relocs_ptr = 0;
83 exec2_objects[0].alignment = 0;
84 exec2_objects[0].offset = bo.offset;
85 exec2_objects[0].flags = bo.flags;
86 exec2_objects[0].rsvd1 = 0;
87 exec2_objects[0].rsvd2 = 0;
88
89 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
90 execbuf.buffer_count = 1;
91 execbuf.batch_start_offset = 0;
92 execbuf.batch_len = size;
93 execbuf.cliprects_ptr = 0;
94 execbuf.num_cliprects = 0;
95 execbuf.DR1 = 0;
96 execbuf.DR4 = 0;
97
98 execbuf.flags =
99 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
100 execbuf.rsvd1 = device->context_id;
101 execbuf.rsvd2 = 0;
102
103 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
104 gen_print_batch(&device->decoder_ctx, bo.map, bo.size, bo.offset, false);
105
106 result = anv_device_execbuf(device, &execbuf, exec_bos);
107 if (result != VK_SUCCESS)
108 goto fail;
109
110 result = anv_device_wait(device, &bo, INT64_MAX);
111
112 fail:
113 anv_bo_pool_free(&device->batch_bo_pool, &bo);
114
115 return result;
116 }
117
118 VkResult anv_QueueSubmit(
119 VkQueue _queue,
120 uint32_t submitCount,
121 const VkSubmitInfo* pSubmits,
122 VkFence fence)
123 {
124 ANV_FROM_HANDLE(anv_queue, queue, _queue);
125 struct anv_device *device = queue->device;
126
127 /* Query for device status prior to submitting. Technically, we don't need
128 * to do this. However, if we have a client that's submitting piles of
129 * garbage, we would rather break as early as possible to keep the GPU
130 * hanging contained. If we don't check here, we'll either be waiting for
131 * the kernel to kick us or we'll have to wait until the client waits on a
132 * fence before we actually know whether or not we've hung.
133 */
134 VkResult result = anv_device_query_status(device);
135 if (result != VK_SUCCESS)
136 return result;
137
138 /* We lock around QueueSubmit for three main reasons:
139 *
140 * 1) When a block pool is resized, we create a new gem handle with a
141 * different size and, in the case of surface states, possibly a
142 * different center offset but we re-use the same anv_bo struct when
143 * we do so. If this happens in the middle of setting up an execbuf,
144 * we could end up with our list of BOs out of sync with our list of
145 * gem handles.
146 *
147 * 2) The algorithm we use for building the list of unique buffers isn't
148 * thread-safe. While the client is supposed to syncronize around
149 * QueueSubmit, this would be extremely difficult to debug if it ever
150 * came up in the wild due to a broken app. It's better to play it
151 * safe and just lock around QueueSubmit.
152 *
153 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
154 * userspace. Due to the fact that the surface state buffer is shared
155 * between batches, we can't afford to have that happen from multiple
156 * threads at the same time. Even though the user is supposed to
157 * ensure this doesn't happen, we play it safe as in (2) above.
158 *
159 * Since the only other things that ever take the device lock such as block
160 * pool resize only rarely happen, this will almost never be contended so
161 * taking a lock isn't really an expensive operation in this case.
162 */
163 pthread_mutex_lock(&device->mutex);
164
165 if (fence && submitCount == 0) {
166 /* If we don't have any command buffers, we need to submit a dummy
167 * batch to give GEM something to wait on. We could, potentially,
168 * come up with something more efficient but this shouldn't be a
169 * common case.
170 */
171 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
172 goto out;
173 }
174
175 for (uint32_t i = 0; i < submitCount; i++) {
176 /* Fence for this submit. NULL for all but the last one */
177 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
178
179 if (pSubmits[i].commandBufferCount == 0) {
180 /* If we don't have any command buffers, we need to submit a dummy
181 * batch to give GEM something to wait on. We could, potentially,
182 * come up with something more efficient but this shouldn't be a
183 * common case.
184 */
185 result = anv_cmd_buffer_execbuf(device, NULL,
186 pSubmits[i].pWaitSemaphores,
187 pSubmits[i].waitSemaphoreCount,
188 pSubmits[i].pSignalSemaphores,
189 pSubmits[i].signalSemaphoreCount,
190 submit_fence);
191 if (result != VK_SUCCESS)
192 goto out;
193
194 continue;
195 }
196
197 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
198 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
199 pSubmits[i].pCommandBuffers[j]);
200 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
201 assert(!anv_batch_has_error(&cmd_buffer->batch));
202
203 /* Fence for this execbuf. NULL for all but the last one */
204 VkFence execbuf_fence =
205 (j == pSubmits[i].commandBufferCount - 1) ?
206 submit_fence : VK_NULL_HANDLE;
207
208 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
209 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
210 if (j == 0) {
211 /* Only the first batch gets the in semaphores */
212 in_semaphores = pSubmits[i].pWaitSemaphores;
213 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
214 }
215
216 if (j == pSubmits[i].commandBufferCount - 1) {
217 /* Only the last batch gets the out semaphores */
218 out_semaphores = pSubmits[i].pSignalSemaphores;
219 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
220 }
221
222 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
223 in_semaphores, num_in_semaphores,
224 out_semaphores, num_out_semaphores,
225 execbuf_fence);
226 if (result != VK_SUCCESS)
227 goto out;
228 }
229 }
230
231 pthread_cond_broadcast(&device->queue_submit);
232
233 out:
234 if (result != VK_SUCCESS) {
235 /* In the case that something has gone wrong we may end up with an
236 * inconsistent state from which it may not be trivial to recover.
237 * For example, we might have computed address relocations and
238 * any future attempt to re-submit this job will need to know about
239 * this and avoid computing relocation addresses again.
240 *
241 * To avoid this sort of issues, we assume that if something was
242 * wrong during submission we must already be in a really bad situation
243 * anyway (such us being out of memory) and return
244 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
245 * submit the same job again to this device.
246 */
247 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
248 }
249
250 pthread_mutex_unlock(&device->mutex);
251
252 return result;
253 }
254
255 VkResult anv_QueueWaitIdle(
256 VkQueue _queue)
257 {
258 ANV_FROM_HANDLE(anv_queue, queue, _queue);
259
260 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
261 }
262
263 VkResult anv_CreateFence(
264 VkDevice _device,
265 const VkFenceCreateInfo* pCreateInfo,
266 const VkAllocationCallbacks* pAllocator,
267 VkFence* pFence)
268 {
269 ANV_FROM_HANDLE(anv_device, device, _device);
270 struct anv_fence *fence;
271
272 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
273
274 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
275 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
276 if (fence == NULL)
277 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
278
279 if (device->instance->physicalDevice.has_syncobj_wait) {
280 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
281
282 uint32_t create_flags = 0;
283 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
284 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
285
286 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
287 if (!fence->permanent.syncobj)
288 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
289 } else {
290 fence->permanent.type = ANV_FENCE_TYPE_BO;
291
292 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
293 &fence->permanent.bo.bo, 4096);
294 if (result != VK_SUCCESS)
295 return result;
296
297 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
298 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
299 } else {
300 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
301 }
302 }
303
304 *pFence = anv_fence_to_handle(fence);
305
306 return VK_SUCCESS;
307 }
308
309 static void
310 anv_fence_impl_cleanup(struct anv_device *device,
311 struct anv_fence_impl *impl)
312 {
313 switch (impl->type) {
314 case ANV_FENCE_TYPE_NONE:
315 /* Dummy. Nothing to do */
316 break;
317
318 case ANV_FENCE_TYPE_BO:
319 anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
320 break;
321
322 case ANV_FENCE_TYPE_SYNCOBJ:
323 anv_gem_syncobj_destroy(device, impl->syncobj);
324 break;
325
326 case ANV_FENCE_TYPE_WSI:
327 impl->fence_wsi->destroy(impl->fence_wsi);
328 break;
329
330 default:
331 unreachable("Invalid fence type");
332 }
333
334 impl->type = ANV_FENCE_TYPE_NONE;
335 }
336
337 void anv_DestroyFence(
338 VkDevice _device,
339 VkFence _fence,
340 const VkAllocationCallbacks* pAllocator)
341 {
342 ANV_FROM_HANDLE(anv_device, device, _device);
343 ANV_FROM_HANDLE(anv_fence, fence, _fence);
344
345 if (!fence)
346 return;
347
348 anv_fence_impl_cleanup(device, &fence->temporary);
349 anv_fence_impl_cleanup(device, &fence->permanent);
350
351 vk_free2(&device->alloc, pAllocator, fence);
352 }
353
354 VkResult anv_ResetFences(
355 VkDevice _device,
356 uint32_t fenceCount,
357 const VkFence* pFences)
358 {
359 ANV_FROM_HANDLE(anv_device, device, _device);
360
361 for (uint32_t i = 0; i < fenceCount; i++) {
362 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
363
364 /* From the Vulkan 1.0.53 spec:
365 *
366 * "If any member of pFences currently has its payload imported with
367 * temporary permanence, that fence’s prior permanent payload is
368 * first restored. The remaining operations described therefore
369 * operate on the restored payload.
370 */
371 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
372 anv_fence_impl_cleanup(device, &fence->temporary);
373
374 struct anv_fence_impl *impl = &fence->permanent;
375
376 switch (impl->type) {
377 case ANV_FENCE_TYPE_BO:
378 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
379 break;
380
381 case ANV_FENCE_TYPE_SYNCOBJ:
382 anv_gem_syncobj_reset(device, impl->syncobj);
383 break;
384
385 default:
386 unreachable("Invalid fence type");
387 }
388 }
389
390 return VK_SUCCESS;
391 }
392
393 VkResult anv_GetFenceStatus(
394 VkDevice _device,
395 VkFence _fence)
396 {
397 ANV_FROM_HANDLE(anv_device, device, _device);
398 ANV_FROM_HANDLE(anv_fence, fence, _fence);
399
400 if (anv_device_is_lost(device))
401 return VK_ERROR_DEVICE_LOST;
402
403 struct anv_fence_impl *impl =
404 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
405 &fence->temporary : &fence->permanent;
406
407 switch (impl->type) {
408 case ANV_FENCE_TYPE_BO:
409 /* BO fences don't support import/export */
410 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
411 switch (impl->bo.state) {
412 case ANV_BO_FENCE_STATE_RESET:
413 /* If it hasn't even been sent off to the GPU yet, it's not ready */
414 return VK_NOT_READY;
415
416 case ANV_BO_FENCE_STATE_SIGNALED:
417 /* It's been signaled, return success */
418 return VK_SUCCESS;
419
420 case ANV_BO_FENCE_STATE_SUBMITTED: {
421 VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
422 if (result == VK_SUCCESS) {
423 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
424 return VK_SUCCESS;
425 } else {
426 return result;
427 }
428 }
429 default:
430 unreachable("Invalid fence status");
431 }
432
433 case ANV_FENCE_TYPE_SYNCOBJ: {
434 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
435 if (ret == -1) {
436 if (errno == ETIME) {
437 return VK_NOT_READY;
438 } else {
439 /* We don't know the real error. */
440 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
441 }
442 } else {
443 return VK_SUCCESS;
444 }
445 }
446
447 default:
448 unreachable("Invalid fence type");
449 }
450 }
451
452 #define NSEC_PER_SEC 1000000000
453 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
454
455 static uint64_t
456 gettime_ns(void)
457 {
458 struct timespec current;
459 clock_gettime(CLOCK_MONOTONIC, &current);
460 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
461 }
462
463 static uint64_t anv_get_absolute_timeout(uint64_t timeout)
464 {
465 if (timeout == 0)
466 return 0;
467 uint64_t current_time = gettime_ns();
468 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
469
470 timeout = MIN2(max_timeout, timeout);
471
472 return (current_time + timeout);
473 }
474
475 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
476 {
477 uint64_t now = gettime_ns();
478
479 /* We don't want negative timeouts.
480 *
481 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
482 * supposed to block indefinitely timeouts < 0. Unfortunately,
483 * this was broken for a couple of kernel releases. Since there's
484 * no way to know whether or not the kernel we're using is one of
485 * the broken ones, the best we can do is to clamp the timeout to
486 * INT64_MAX. This limits the maximum timeout from 584 years to
487 * 292 years - likely not a big deal.
488 */
489 if (abs_timeout < now)
490 return 0;
491
492 uint64_t rel_timeout = abs_timeout - now;
493 if (rel_timeout > (uint64_t) INT64_MAX)
494 rel_timeout = INT64_MAX;
495
496 return rel_timeout;
497 }
498
499 static VkResult
500 anv_wait_for_syncobj_fences(struct anv_device *device,
501 uint32_t fenceCount,
502 const VkFence *pFences,
503 bool waitAll,
504 uint64_t abs_timeout_ns)
505 {
506 uint32_t *syncobjs = vk_zalloc(&device->alloc,
507 sizeof(*syncobjs) * fenceCount, 8,
508 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
509 if (!syncobjs)
510 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
511
512 for (uint32_t i = 0; i < fenceCount; i++) {
513 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
514 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
515
516 struct anv_fence_impl *impl =
517 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
518 &fence->temporary : &fence->permanent;
519
520 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
521 syncobjs[i] = impl->syncobj;
522 }
523
524 /* The gem_syncobj_wait ioctl may return early due to an inherent
525 * limitation in the way it computes timeouts. Loop until we've actually
526 * passed the timeout.
527 */
528 int ret;
529 do {
530 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
531 abs_timeout_ns, waitAll);
532 } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
533
534 vk_free(&device->alloc, syncobjs);
535
536 if (ret == -1) {
537 if (errno == ETIME) {
538 return VK_TIMEOUT;
539 } else {
540 /* We don't know the real error. */
541 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
542 }
543 } else {
544 return VK_SUCCESS;
545 }
546 }
547
548 static VkResult
549 anv_wait_for_bo_fences(struct anv_device *device,
550 uint32_t fenceCount,
551 const VkFence *pFences,
552 bool waitAll,
553 uint64_t abs_timeout_ns)
554 {
555 VkResult result = VK_SUCCESS;
556 uint32_t pending_fences = fenceCount;
557 while (pending_fences) {
558 pending_fences = 0;
559 bool signaled_fences = false;
560 for (uint32_t i = 0; i < fenceCount; i++) {
561 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
562
563 /* This function assumes that all fences are BO fences and that they
564 * have no temporary state. Since BO fences will never be exported,
565 * this should be a safe assumption.
566 */
567 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
568 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
569 struct anv_fence_impl *impl = &fence->permanent;
570
571 switch (impl->bo.state) {
572 case ANV_BO_FENCE_STATE_RESET:
573 /* This fence hasn't been submitted yet, we'll catch it the next
574 * time around. Yes, this may mean we dead-loop but, short of
575 * lots of locking and a condition variable, there's not much that
576 * we can do about that.
577 */
578 pending_fences++;
579 continue;
580
581 case ANV_BO_FENCE_STATE_SIGNALED:
582 /* This fence is not pending. If waitAll isn't set, we can return
583 * early. Otherwise, we have to keep going.
584 */
585 if (!waitAll) {
586 result = VK_SUCCESS;
587 goto done;
588 }
589 continue;
590
591 case ANV_BO_FENCE_STATE_SUBMITTED:
592 /* These are the fences we really care about. Go ahead and wait
593 * on it until we hit a timeout.
594 */
595 result = anv_device_wait(device, &impl->bo.bo,
596 anv_get_relative_timeout(abs_timeout_ns));
597 switch (result) {
598 case VK_SUCCESS:
599 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
600 signaled_fences = true;
601 if (!waitAll)
602 goto done;
603 break;
604
605 case VK_TIMEOUT:
606 goto done;
607
608 default:
609 return result;
610 }
611 }
612 }
613
614 if (pending_fences && !signaled_fences) {
615 /* If we've hit this then someone decided to vkWaitForFences before
616 * they've actually submitted any of them to a queue. This is a
617 * fairly pessimal case, so it's ok to lock here and use a standard
618 * pthreads condition variable.
619 */
620 pthread_mutex_lock(&device->mutex);
621
622 /* It's possible that some of the fences have changed state since the
623 * last time we checked. Now that we have the lock, check for
624 * pending fences again and don't wait if it's changed.
625 */
626 uint32_t now_pending_fences = 0;
627 for (uint32_t i = 0; i < fenceCount; i++) {
628 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
629 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
630 now_pending_fences++;
631 }
632 assert(now_pending_fences <= pending_fences);
633
634 if (now_pending_fences == pending_fences) {
635 struct timespec abstime = {
636 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
637 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
638 };
639
640 ASSERTED int ret;
641 ret = pthread_cond_timedwait(&device->queue_submit,
642 &device->mutex, &abstime);
643 assert(ret != EINVAL);
644 if (gettime_ns() >= abs_timeout_ns) {
645 pthread_mutex_unlock(&device->mutex);
646 result = VK_TIMEOUT;
647 goto done;
648 }
649 }
650
651 pthread_mutex_unlock(&device->mutex);
652 }
653 }
654
655 done:
656 if (anv_device_is_lost(device))
657 return VK_ERROR_DEVICE_LOST;
658
659 return result;
660 }
661
662 static VkResult
663 anv_wait_for_wsi_fence(struct anv_device *device,
664 const VkFence _fence,
665 uint64_t abs_timeout)
666 {
667 ANV_FROM_HANDLE(anv_fence, fence, _fence);
668 struct anv_fence_impl *impl = &fence->permanent;
669
670 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
671 }
672
673 static VkResult
674 anv_wait_for_fences(struct anv_device *device,
675 uint32_t fenceCount,
676 const VkFence *pFences,
677 bool waitAll,
678 uint64_t abs_timeout)
679 {
680 VkResult result = VK_SUCCESS;
681
682 if (fenceCount <= 1 || waitAll) {
683 for (uint32_t i = 0; i < fenceCount; i++) {
684 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
685 switch (fence->permanent.type) {
686 case ANV_FENCE_TYPE_BO:
687 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
688 true, abs_timeout);
689 break;
690 case ANV_FENCE_TYPE_SYNCOBJ:
691 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
692 true, abs_timeout);
693 break;
694 case ANV_FENCE_TYPE_WSI:
695 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
696 break;
697 case ANV_FENCE_TYPE_NONE:
698 result = VK_SUCCESS;
699 break;
700 }
701 if (result != VK_SUCCESS)
702 return result;
703 }
704 } else {
705 do {
706 for (uint32_t i = 0; i < fenceCount; i++) {
707 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
708 return VK_SUCCESS;
709 }
710 } while (gettime_ns() < abs_timeout);
711 result = VK_TIMEOUT;
712 }
713 return result;
714 }
715
716 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
717 {
718 for (uint32_t i = 0; i < fenceCount; ++i) {
719 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
720 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
721 return false;
722 }
723 return true;
724 }
725
726 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
727 {
728 for (uint32_t i = 0; i < fenceCount; ++i) {
729 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
730 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
731 return false;
732 }
733 return true;
734 }
735
736 VkResult anv_WaitForFences(
737 VkDevice _device,
738 uint32_t fenceCount,
739 const VkFence* pFences,
740 VkBool32 waitAll,
741 uint64_t timeout)
742 {
743 ANV_FROM_HANDLE(anv_device, device, _device);
744
745 if (anv_device_is_lost(device))
746 return VK_ERROR_DEVICE_LOST;
747
748 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
749 if (anv_all_fences_syncobj(fenceCount, pFences)) {
750 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
751 waitAll, abs_timeout);
752 } else if (anv_all_fences_bo(fenceCount, pFences)) {
753 return anv_wait_for_bo_fences(device, fenceCount, pFences,
754 waitAll, abs_timeout);
755 } else {
756 return anv_wait_for_fences(device, fenceCount, pFences,
757 waitAll, abs_timeout);
758 }
759 }
760
761 void anv_GetPhysicalDeviceExternalFenceProperties(
762 VkPhysicalDevice physicalDevice,
763 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
764 VkExternalFenceProperties* pExternalFenceProperties)
765 {
766 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
767
768 switch (pExternalFenceInfo->handleType) {
769 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
770 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
771 if (device->has_syncobj_wait) {
772 pExternalFenceProperties->exportFromImportedHandleTypes =
773 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
774 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
775 pExternalFenceProperties->compatibleHandleTypes =
776 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
777 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
778 pExternalFenceProperties->externalFenceFeatures =
779 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
780 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
781 return;
782 }
783 break;
784
785 default:
786 break;
787 }
788
789 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
790 pExternalFenceProperties->compatibleHandleTypes = 0;
791 pExternalFenceProperties->externalFenceFeatures = 0;
792 }
793
794 VkResult anv_ImportFenceFdKHR(
795 VkDevice _device,
796 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
797 {
798 ANV_FROM_HANDLE(anv_device, device, _device);
799 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
800 int fd = pImportFenceFdInfo->fd;
801
802 assert(pImportFenceFdInfo->sType ==
803 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
804
805 struct anv_fence_impl new_impl = {
806 .type = ANV_FENCE_TYPE_NONE,
807 };
808
809 switch (pImportFenceFdInfo->handleType) {
810 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
811 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
812
813 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
814 if (!new_impl.syncobj)
815 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
816
817 break;
818
819 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
820 /* Sync files are a bit tricky. Because we want to continue using the
821 * syncobj implementation of WaitForFences, we don't use the sync file
822 * directly but instead import it into a syncobj.
823 */
824 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
825
826 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
827 if (!new_impl.syncobj)
828 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
829
830 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
831 anv_gem_syncobj_destroy(device, new_impl.syncobj);
832 return vk_errorf(device->instance, NULL,
833 VK_ERROR_INVALID_EXTERNAL_HANDLE,
834 "syncobj sync file import failed: %m");
835 }
836 break;
837
838 default:
839 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
840 }
841
842 /* From the Vulkan 1.0.53 spec:
843 *
844 * "Importing a fence payload from a file descriptor transfers
845 * ownership of the file descriptor from the application to the
846 * Vulkan implementation. The application must not perform any
847 * operations on the file descriptor after a successful import."
848 *
849 * If the import fails, we leave the file descriptor open.
850 */
851 close(fd);
852
853 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
854 anv_fence_impl_cleanup(device, &fence->temporary);
855 fence->temporary = new_impl;
856 } else {
857 anv_fence_impl_cleanup(device, &fence->permanent);
858 fence->permanent = new_impl;
859 }
860
861 return VK_SUCCESS;
862 }
863
864 VkResult anv_GetFenceFdKHR(
865 VkDevice _device,
866 const VkFenceGetFdInfoKHR* pGetFdInfo,
867 int* pFd)
868 {
869 ANV_FROM_HANDLE(anv_device, device, _device);
870 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
871
872 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
873
874 struct anv_fence_impl *impl =
875 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
876 &fence->temporary : &fence->permanent;
877
878 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
879 switch (pGetFdInfo->handleType) {
880 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
881 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
882 if (fd < 0)
883 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
884
885 *pFd = fd;
886 break;
887 }
888
889 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
890 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
891 if (fd < 0)
892 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
893
894 *pFd = fd;
895 break;
896 }
897
898 default:
899 unreachable("Invalid fence export handle type");
900 }
901
902 /* From the Vulkan 1.0.53 spec:
903 *
904 * "Export operations have the same transference as the specified handle
905 * type’s import operations. [...] If the fence was using a
906 * temporarily imported payload, the fence’s prior permanent payload
907 * will be restored.
908 */
909 if (impl == &fence->temporary)
910 anv_fence_impl_cleanup(device, impl);
911
912 return VK_SUCCESS;
913 }
914
915 // Queue semaphore functions
916
917 VkResult anv_CreateSemaphore(
918 VkDevice _device,
919 const VkSemaphoreCreateInfo* pCreateInfo,
920 const VkAllocationCallbacks* pAllocator,
921 VkSemaphore* pSemaphore)
922 {
923 ANV_FROM_HANDLE(anv_device, device, _device);
924 struct anv_semaphore *semaphore;
925
926 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
927
928 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
929 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
930 if (semaphore == NULL)
931 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
932
933 const VkExportSemaphoreCreateInfo *export =
934 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
935 VkExternalSemaphoreHandleTypeFlags handleTypes =
936 export ? export->handleTypes : 0;
937
938 if (handleTypes == 0) {
939 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
940 * on the same ring. Since we don't expose the blit engine as a DMA
941 * queue, a dummy no-op semaphore is a perfectly valid implementation.
942 */
943 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
944 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
945 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
946 if (device->instance->physicalDevice.has_syncobj) {
947 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
948 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
949 if (!semaphore->permanent.syncobj) {
950 vk_free2(&device->alloc, pAllocator, semaphore);
951 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
952 }
953 } else {
954 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
955 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
956 4096, ANV_BO_EXTERNAL,
957 &semaphore->permanent.bo);
958 if (result != VK_SUCCESS) {
959 vk_free2(&device->alloc, pAllocator, semaphore);
960 return result;
961 }
962
963 /* If we're going to use this as a fence, we need to *not* have the
964 * EXEC_OBJECT_ASYNC bit set.
965 */
966 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
967 }
968 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
969 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
970 if (device->instance->physicalDevice.has_syncobj) {
971 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
972 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
973 } else {
974 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
975 semaphore->permanent.fd = -1;
976 }
977 } else {
978 assert(!"Unknown handle type");
979 vk_free2(&device->alloc, pAllocator, semaphore);
980 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
981 }
982
983 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
984
985 *pSemaphore = anv_semaphore_to_handle(semaphore);
986
987 return VK_SUCCESS;
988 }
989
990 static void
991 anv_semaphore_impl_cleanup(struct anv_device *device,
992 struct anv_semaphore_impl *impl)
993 {
994 switch (impl->type) {
995 case ANV_SEMAPHORE_TYPE_NONE:
996 case ANV_SEMAPHORE_TYPE_DUMMY:
997 /* Dummy. Nothing to do */
998 break;
999
1000 case ANV_SEMAPHORE_TYPE_BO:
1001 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
1002 break;
1003
1004 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1005 close(impl->fd);
1006 break;
1007
1008 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1009 anv_gem_syncobj_destroy(device, impl->syncobj);
1010 break;
1011
1012 default:
1013 unreachable("Invalid semaphore type");
1014 }
1015
1016 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1017 }
1018
1019 void
1020 anv_semaphore_reset_temporary(struct anv_device *device,
1021 struct anv_semaphore *semaphore)
1022 {
1023 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1024 return;
1025
1026 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1027 }
1028
1029 void anv_DestroySemaphore(
1030 VkDevice _device,
1031 VkSemaphore _semaphore,
1032 const VkAllocationCallbacks* pAllocator)
1033 {
1034 ANV_FROM_HANDLE(anv_device, device, _device);
1035 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1036
1037 if (semaphore == NULL)
1038 return;
1039
1040 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1041 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1042
1043 vk_free2(&device->alloc, pAllocator, semaphore);
1044 }
1045
1046 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1047 VkPhysicalDevice physicalDevice,
1048 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1049 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1050 {
1051 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1052
1053 switch (pExternalSemaphoreInfo->handleType) {
1054 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1055 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1056 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1057 pExternalSemaphoreProperties->compatibleHandleTypes =
1058 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1059 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1060 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1061 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1062 return;
1063
1064 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1065 if (device->has_exec_fence) {
1066 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1067 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1068 pExternalSemaphoreProperties->compatibleHandleTypes =
1069 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1070 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1071 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1072 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1073 return;
1074 }
1075 break;
1076
1077 default:
1078 break;
1079 }
1080
1081 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1082 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1083 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1084 }
1085
1086 VkResult anv_ImportSemaphoreFdKHR(
1087 VkDevice _device,
1088 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1089 {
1090 ANV_FROM_HANDLE(anv_device, device, _device);
1091 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1092 int fd = pImportSemaphoreFdInfo->fd;
1093
1094 struct anv_semaphore_impl new_impl = {
1095 .type = ANV_SEMAPHORE_TYPE_NONE,
1096 };
1097
1098 switch (pImportSemaphoreFdInfo->handleType) {
1099 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1100 if (device->instance->physicalDevice.has_syncobj) {
1101 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1102
1103 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1104 if (!new_impl.syncobj)
1105 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1106 } else {
1107 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1108
1109 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1110 fd, ANV_BO_EXTERNAL,
1111 &new_impl.bo);
1112 if (result != VK_SUCCESS)
1113 return result;
1114
1115 if (new_impl.bo->size < 4096) {
1116 anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1117 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1118 }
1119
1120 /* If we're going to use this as a fence, we need to *not* have the
1121 * EXEC_OBJECT_ASYNC bit set.
1122 */
1123 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1124 }
1125
1126 /* From the Vulkan spec:
1127 *
1128 * "Importing semaphore state from a file descriptor transfers
1129 * ownership of the file descriptor from the application to the
1130 * Vulkan implementation. The application must not perform any
1131 * operations on the file descriptor after a successful import."
1132 *
1133 * If the import fails, we leave the file descriptor open.
1134 */
1135 close(fd);
1136 break;
1137
1138 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1139 if (device->instance->physicalDevice.has_syncobj) {
1140 new_impl = (struct anv_semaphore_impl) {
1141 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1142 .syncobj = anv_gem_syncobj_create(device, 0),
1143 };
1144 if (!new_impl.syncobj)
1145 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1146 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1147 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1148 return vk_errorf(device->instance, NULL,
1149 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1150 "syncobj sync file import failed: %m");
1151 }
1152 /* Ownership of the FD is transfered to Anv. Since we don't need it
1153 * anymore because the associated fence has been put into a syncobj,
1154 * we must close the FD.
1155 */
1156 close(fd);
1157 } else {
1158 new_impl = (struct anv_semaphore_impl) {
1159 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1160 .fd = fd,
1161 };
1162 }
1163 break;
1164
1165 default:
1166 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1167 }
1168
1169 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1170 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1171 semaphore->temporary = new_impl;
1172 } else {
1173 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1174 semaphore->permanent = new_impl;
1175 }
1176
1177 return VK_SUCCESS;
1178 }
1179
1180 VkResult anv_GetSemaphoreFdKHR(
1181 VkDevice _device,
1182 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1183 int* pFd)
1184 {
1185 ANV_FROM_HANDLE(anv_device, device, _device);
1186 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1187 VkResult result;
1188 int fd;
1189
1190 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1191
1192 struct anv_semaphore_impl *impl =
1193 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1194 &semaphore->temporary : &semaphore->permanent;
1195
1196 switch (impl->type) {
1197 case ANV_SEMAPHORE_TYPE_BO:
1198 result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1199 if (result != VK_SUCCESS)
1200 return result;
1201 break;
1202
1203 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1204 /* There are two reasons why this could happen:
1205 *
1206 * 1) The user is trying to export without submitting something that
1207 * signals the semaphore. If this is the case, it's their bug so
1208 * what we return here doesn't matter.
1209 *
1210 * 2) The kernel didn't give us a file descriptor. The most likely
1211 * reason for this is running out of file descriptors.
1212 */
1213 if (impl->fd < 0)
1214 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1215
1216 *pFd = impl->fd;
1217
1218 /* From the Vulkan 1.0.53 spec:
1219 *
1220 * "...exporting a semaphore payload to a handle with copy
1221 * transference has the same side effects on the source
1222 * semaphore’s payload as executing a semaphore wait operation."
1223 *
1224 * In other words, it may still be a SYNC_FD semaphore, but it's now
1225 * considered to have been waited on and no longer has a sync file
1226 * attached.
1227 */
1228 impl->fd = -1;
1229 return VK_SUCCESS;
1230
1231 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1232 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
1233 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1234 else {
1235 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1236 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1237 }
1238 if (fd < 0)
1239 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1240 *pFd = fd;
1241 break;
1242
1243 default:
1244 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1245 }
1246
1247 /* From the Vulkan 1.0.53 spec:
1248 *
1249 * "Export operations have the same transference as the specified handle
1250 * type’s import operations. [...] If the semaphore was using a
1251 * temporarily imported payload, the semaphore’s prior permanent payload
1252 * will be restored.
1253 */
1254 if (impl == &semaphore->temporary)
1255 anv_semaphore_impl_cleanup(device, impl);
1256
1257 return VK_SUCCESS;
1258 }