anv: prepare driver to report submission error through queues
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30
31 #include "anv_private.h"
32 #include "vk_util.h"
33
34 #include "genxml/gen7_pack.h"
35
36 uint64_t anv_gettime_ns(void)
37 {
38 struct timespec current;
39 clock_gettime(CLOCK_MONOTONIC, &current);
40 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
41 }
42
43 uint64_t anv_get_absolute_timeout(uint64_t timeout)
44 {
45 if (timeout == 0)
46 return 0;
47 uint64_t current_time = anv_gettime_ns();
48 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
49
50 timeout = MIN2(max_timeout, timeout);
51
52 return (current_time + timeout);
53 }
54
55 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
56 {
57 uint64_t now = anv_gettime_ns();
58
59 /* We don't want negative timeouts.
60 *
61 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
62 * supposed to block indefinitely timeouts < 0. Unfortunately,
63 * this was broken for a couple of kernel releases. Since there's
64 * no way to know whether or not the kernel we're using is one of
65 * the broken ones, the best we can do is to clamp the timeout to
66 * INT64_MAX. This limits the maximum timeout from 584 years to
67 * 292 years - likely not a big deal.
68 */
69 if (abs_timeout < now)
70 return 0;
71
72 uint64_t rel_timeout = abs_timeout - now;
73 if (rel_timeout > (uint64_t) INT64_MAX)
74 rel_timeout = INT64_MAX;
75
76 return rel_timeout;
77 }
78
79 VkResult
80 anv_queue_execbuf(struct anv_queue *queue,
81 struct drm_i915_gem_execbuffer2 *execbuf,
82 struct anv_bo **execbuf_bos)
83 {
84 struct anv_device *device = queue->device;
85 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
86 if (ret != 0) {
87 /* We don't know the real error. */
88 return anv_queue_set_lost(queue, "execbuf2 failed: %m");
89 }
90
91 struct drm_i915_gem_exec_object2 *objects =
92 (void *)(uintptr_t)execbuf->buffers_ptr;
93 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
94 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
95 assert(execbuf_bos[k]->offset == objects[k].offset);
96 execbuf_bos[k]->offset = objects[k].offset;
97 }
98
99 return VK_SUCCESS;
100 }
101
102 VkResult
103 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
104 {
105 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
106 queue->device = device;
107 queue->flags = 0;
108
109 return VK_SUCCESS;
110 }
111
112 void
113 anv_queue_finish(struct anv_queue *queue)
114 {
115 }
116
117 VkResult
118 anv_queue_submit_simple_batch(struct anv_queue *queue,
119 struct anv_batch *batch)
120 {
121 struct anv_device *device = queue->device;
122 struct drm_i915_gem_execbuffer2 execbuf;
123 struct drm_i915_gem_exec_object2 exec2_objects[1];
124 struct anv_bo *bo;
125 VkResult result = VK_SUCCESS;
126 uint32_t size;
127
128 if (batch) {
129 /* Kernel driver requires 8 byte aligned batch length */
130 size = align_u32(batch->next - batch->start, 8);
131 result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &bo);
132 if (result != VK_SUCCESS)
133 return result;
134
135 memcpy(bo->map, batch->start, size);
136 if (!device->info.has_llc)
137 gen_flush_range(bo->map, size);
138 } else {
139 size = device->trivial_batch_bo->size;
140 bo = device->trivial_batch_bo;
141 }
142
143 exec2_objects[0].handle = bo->gem_handle;
144 exec2_objects[0].relocation_count = 0;
145 exec2_objects[0].relocs_ptr = 0;
146 exec2_objects[0].alignment = 0;
147 exec2_objects[0].offset = bo->offset;
148 exec2_objects[0].flags = bo->flags;
149 exec2_objects[0].rsvd1 = 0;
150 exec2_objects[0].rsvd2 = 0;
151
152 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
153 execbuf.buffer_count = 1;
154 execbuf.batch_start_offset = 0;
155 execbuf.batch_len = size;
156 execbuf.cliprects_ptr = 0;
157 execbuf.num_cliprects = 0;
158 execbuf.DR1 = 0;
159 execbuf.DR4 = 0;
160
161 execbuf.flags =
162 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
163 execbuf.rsvd1 = device->context_id;
164 execbuf.rsvd2 = 0;
165
166 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
167 gen_print_batch(&device->decoder_ctx, bo->map,
168 bo->size, bo->offset, false);
169 }
170
171 result = anv_queue_execbuf(queue, &execbuf, &bo);
172 if (result != VK_SUCCESS)
173 goto fail;
174
175 result = anv_device_wait(device, bo, INT64_MAX);
176
177 fail:
178 if (batch)
179 anv_bo_pool_free(&device->batch_bo_pool, bo);
180
181 return result;
182 }
183
184 VkResult anv_QueueSubmit(
185 VkQueue _queue,
186 uint32_t submitCount,
187 const VkSubmitInfo* pSubmits,
188 VkFence fence)
189 {
190 ANV_FROM_HANDLE(anv_queue, queue, _queue);
191 struct anv_device *device = queue->device;
192
193 /* Query for device status prior to submitting. Technically, we don't need
194 * to do this. However, if we have a client that's submitting piles of
195 * garbage, we would rather break as early as possible to keep the GPU
196 * hanging contained. If we don't check here, we'll either be waiting for
197 * the kernel to kick us or we'll have to wait until the client waits on a
198 * fence before we actually know whether or not we've hung.
199 */
200 VkResult result = anv_device_query_status(device);
201 if (result != VK_SUCCESS)
202 return result;
203
204 /* We lock around QueueSubmit for three main reasons:
205 *
206 * 1) When a block pool is resized, we create a new gem handle with a
207 * different size and, in the case of surface states, possibly a
208 * different center offset but we re-use the same anv_bo struct when
209 * we do so. If this happens in the middle of setting up an execbuf,
210 * we could end up with our list of BOs out of sync with our list of
211 * gem handles.
212 *
213 * 2) The algorithm we use for building the list of unique buffers isn't
214 * thread-safe. While the client is supposed to syncronize around
215 * QueueSubmit, this would be extremely difficult to debug if it ever
216 * came up in the wild due to a broken app. It's better to play it
217 * safe and just lock around QueueSubmit.
218 *
219 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
220 * userspace. Due to the fact that the surface state buffer is shared
221 * between batches, we can't afford to have that happen from multiple
222 * threads at the same time. Even though the user is supposed to
223 * ensure this doesn't happen, we play it safe as in (2) above.
224 *
225 * Since the only other things that ever take the device lock such as block
226 * pool resize only rarely happen, this will almost never be contended so
227 * taking a lock isn't really an expensive operation in this case.
228 */
229 pthread_mutex_lock(&device->mutex);
230
231 if (fence && submitCount == 0) {
232 /* If we don't have any command buffers, we need to submit a dummy
233 * batch to give GEM something to wait on. We could, potentially,
234 * come up with something more efficient but this shouldn't be a
235 * common case.
236 */
237 result = anv_cmd_buffer_execbuf(queue, NULL, NULL, 0, NULL, 0, fence);
238 goto out;
239 }
240
241 for (uint32_t i = 0; i < submitCount; i++) {
242 /* Fence for this submit. NULL for all but the last one */
243 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
244
245 if (pSubmits[i].commandBufferCount == 0) {
246 /* If we don't have any command buffers, we need to submit a dummy
247 * batch to give GEM something to wait on. We could, potentially,
248 * come up with something more efficient but this shouldn't be a
249 * common case.
250 */
251 result = anv_cmd_buffer_execbuf(queue, NULL,
252 pSubmits[i].pWaitSemaphores,
253 pSubmits[i].waitSemaphoreCount,
254 pSubmits[i].pSignalSemaphores,
255 pSubmits[i].signalSemaphoreCount,
256 submit_fence);
257 if (result != VK_SUCCESS)
258 goto out;
259
260 continue;
261 }
262
263 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
264 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
265 pSubmits[i].pCommandBuffers[j]);
266 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
267 assert(!anv_batch_has_error(&cmd_buffer->batch));
268
269 /* Fence for this execbuf. NULL for all but the last one */
270 VkFence execbuf_fence =
271 (j == pSubmits[i].commandBufferCount - 1) ?
272 submit_fence : VK_NULL_HANDLE;
273
274 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
275 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
276 if (j == 0) {
277 /* Only the first batch gets the in semaphores */
278 in_semaphores = pSubmits[i].pWaitSemaphores;
279 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
280 }
281
282 if (j == pSubmits[i].commandBufferCount - 1) {
283 /* Only the last batch gets the out semaphores */
284 out_semaphores = pSubmits[i].pSignalSemaphores;
285 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
286 }
287
288 result = anv_cmd_buffer_execbuf(queue, cmd_buffer,
289 in_semaphores, num_in_semaphores,
290 out_semaphores, num_out_semaphores,
291 execbuf_fence);
292 if (result != VK_SUCCESS)
293 goto out;
294 }
295 }
296
297 pthread_cond_broadcast(&device->queue_submit);
298
299 out:
300 if (result != VK_SUCCESS) {
301 /* In the case that something has gone wrong we may end up with an
302 * inconsistent state from which it may not be trivial to recover.
303 * For example, we might have computed address relocations and
304 * any future attempt to re-submit this job will need to know about
305 * this and avoid computing relocation addresses again.
306 *
307 * To avoid this sort of issues, we assume that if something was
308 * wrong during submission we must already be in a really bad situation
309 * anyway (such us being out of memory) and return
310 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
311 * submit the same job again to this device.
312 */
313 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
314 }
315
316 pthread_mutex_unlock(&device->mutex);
317
318 return result;
319 }
320
321 VkResult anv_QueueWaitIdle(
322 VkQueue _queue)
323 {
324 ANV_FROM_HANDLE(anv_queue, queue, _queue);
325
326 if (anv_device_is_lost(queue->device))
327 return VK_ERROR_DEVICE_LOST;
328
329 return anv_queue_submit_simple_batch(queue, NULL);
330 }
331
332 VkResult anv_CreateFence(
333 VkDevice _device,
334 const VkFenceCreateInfo* pCreateInfo,
335 const VkAllocationCallbacks* pAllocator,
336 VkFence* pFence)
337 {
338 ANV_FROM_HANDLE(anv_device, device, _device);
339 struct anv_fence *fence;
340
341 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
342
343 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
344 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
345 if (fence == NULL)
346 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
347
348 if (device->instance->physicalDevice.has_syncobj_wait) {
349 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
350
351 uint32_t create_flags = 0;
352 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
353 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
354
355 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
356 if (!fence->permanent.syncobj)
357 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
358 } else {
359 fence->permanent.type = ANV_FENCE_TYPE_BO;
360
361 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
362 &fence->permanent.bo.bo);
363 if (result != VK_SUCCESS)
364 return result;
365
366 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
367 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
368 } else {
369 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
370 }
371 }
372
373 *pFence = anv_fence_to_handle(fence);
374
375 return VK_SUCCESS;
376 }
377
378 static void
379 anv_fence_impl_cleanup(struct anv_device *device,
380 struct anv_fence_impl *impl)
381 {
382 switch (impl->type) {
383 case ANV_FENCE_TYPE_NONE:
384 /* Dummy. Nothing to do */
385 break;
386
387 case ANV_FENCE_TYPE_BO:
388 anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
389 break;
390
391 case ANV_FENCE_TYPE_SYNCOBJ:
392 anv_gem_syncobj_destroy(device, impl->syncobj);
393 break;
394
395 case ANV_FENCE_TYPE_WSI:
396 impl->fence_wsi->destroy(impl->fence_wsi);
397 break;
398
399 default:
400 unreachable("Invalid fence type");
401 }
402
403 impl->type = ANV_FENCE_TYPE_NONE;
404 }
405
406 void anv_DestroyFence(
407 VkDevice _device,
408 VkFence _fence,
409 const VkAllocationCallbacks* pAllocator)
410 {
411 ANV_FROM_HANDLE(anv_device, device, _device);
412 ANV_FROM_HANDLE(anv_fence, fence, _fence);
413
414 if (!fence)
415 return;
416
417 anv_fence_impl_cleanup(device, &fence->temporary);
418 anv_fence_impl_cleanup(device, &fence->permanent);
419
420 vk_free2(&device->alloc, pAllocator, fence);
421 }
422
423 VkResult anv_ResetFences(
424 VkDevice _device,
425 uint32_t fenceCount,
426 const VkFence* pFences)
427 {
428 ANV_FROM_HANDLE(anv_device, device, _device);
429
430 for (uint32_t i = 0; i < fenceCount; i++) {
431 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
432
433 /* From the Vulkan 1.0.53 spec:
434 *
435 * "If any member of pFences currently has its payload imported with
436 * temporary permanence, that fence’s prior permanent payload is
437 * first restored. The remaining operations described therefore
438 * operate on the restored payload.
439 */
440 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
441 anv_fence_impl_cleanup(device, &fence->temporary);
442
443 struct anv_fence_impl *impl = &fence->permanent;
444
445 switch (impl->type) {
446 case ANV_FENCE_TYPE_BO:
447 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
448 break;
449
450 case ANV_FENCE_TYPE_SYNCOBJ:
451 anv_gem_syncobj_reset(device, impl->syncobj);
452 break;
453
454 default:
455 unreachable("Invalid fence type");
456 }
457 }
458
459 return VK_SUCCESS;
460 }
461
462 VkResult anv_GetFenceStatus(
463 VkDevice _device,
464 VkFence _fence)
465 {
466 ANV_FROM_HANDLE(anv_device, device, _device);
467 ANV_FROM_HANDLE(anv_fence, fence, _fence);
468
469 if (anv_device_is_lost(device))
470 return VK_ERROR_DEVICE_LOST;
471
472 struct anv_fence_impl *impl =
473 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
474 &fence->temporary : &fence->permanent;
475
476 switch (impl->type) {
477 case ANV_FENCE_TYPE_BO:
478 /* BO fences don't support import/export */
479 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
480 switch (impl->bo.state) {
481 case ANV_BO_FENCE_STATE_RESET:
482 /* If it hasn't even been sent off to the GPU yet, it's not ready */
483 return VK_NOT_READY;
484
485 case ANV_BO_FENCE_STATE_SIGNALED:
486 /* It's been signaled, return success */
487 return VK_SUCCESS;
488
489 case ANV_BO_FENCE_STATE_SUBMITTED: {
490 VkResult result = anv_device_bo_busy(device, impl->bo.bo);
491 if (result == VK_SUCCESS) {
492 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
493 return VK_SUCCESS;
494 } else {
495 return result;
496 }
497 }
498 default:
499 unreachable("Invalid fence status");
500 }
501
502 case ANV_FENCE_TYPE_SYNCOBJ: {
503 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
504 if (ret == -1) {
505 if (errno == ETIME) {
506 return VK_NOT_READY;
507 } else {
508 /* We don't know the real error. */
509 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
510 }
511 } else {
512 return VK_SUCCESS;
513 }
514 }
515
516 default:
517 unreachable("Invalid fence type");
518 }
519 }
520
521 static VkResult
522 anv_wait_for_syncobj_fences(struct anv_device *device,
523 uint32_t fenceCount,
524 const VkFence *pFences,
525 bool waitAll,
526 uint64_t abs_timeout_ns)
527 {
528 uint32_t *syncobjs = vk_zalloc(&device->alloc,
529 sizeof(*syncobjs) * fenceCount, 8,
530 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
531 if (!syncobjs)
532 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
533
534 for (uint32_t i = 0; i < fenceCount; i++) {
535 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
536 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
537
538 struct anv_fence_impl *impl =
539 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
540 &fence->temporary : &fence->permanent;
541
542 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
543 syncobjs[i] = impl->syncobj;
544 }
545
546 /* The gem_syncobj_wait ioctl may return early due to an inherent
547 * limitation in the way it computes timeouts. Loop until we've actually
548 * passed the timeout.
549 */
550 int ret;
551 do {
552 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
553 abs_timeout_ns, waitAll);
554 } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
555
556 vk_free(&device->alloc, syncobjs);
557
558 if (ret == -1) {
559 if (errno == ETIME) {
560 return VK_TIMEOUT;
561 } else {
562 /* We don't know the real error. */
563 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
564 }
565 } else {
566 return VK_SUCCESS;
567 }
568 }
569
570 static VkResult
571 anv_wait_for_bo_fences(struct anv_device *device,
572 uint32_t fenceCount,
573 const VkFence *pFences,
574 bool waitAll,
575 uint64_t abs_timeout_ns)
576 {
577 VkResult result = VK_SUCCESS;
578 uint32_t pending_fences = fenceCount;
579 while (pending_fences) {
580 pending_fences = 0;
581 bool signaled_fences = false;
582 for (uint32_t i = 0; i < fenceCount; i++) {
583 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
584
585 /* This function assumes that all fences are BO fences and that they
586 * have no temporary state. Since BO fences will never be exported,
587 * this should be a safe assumption.
588 */
589 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
590 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
591 struct anv_fence_impl *impl = &fence->permanent;
592
593 switch (impl->bo.state) {
594 case ANV_BO_FENCE_STATE_RESET:
595 /* This fence hasn't been submitted yet, we'll catch it the next
596 * time around. Yes, this may mean we dead-loop but, short of
597 * lots of locking and a condition variable, there's not much that
598 * we can do about that.
599 */
600 pending_fences++;
601 continue;
602
603 case ANV_BO_FENCE_STATE_SIGNALED:
604 /* This fence is not pending. If waitAll isn't set, we can return
605 * early. Otherwise, we have to keep going.
606 */
607 if (!waitAll) {
608 result = VK_SUCCESS;
609 goto done;
610 }
611 continue;
612
613 case ANV_BO_FENCE_STATE_SUBMITTED:
614 /* These are the fences we really care about. Go ahead and wait
615 * on it until we hit a timeout.
616 */
617 result = anv_device_wait(device, impl->bo.bo,
618 anv_get_relative_timeout(abs_timeout_ns));
619 switch (result) {
620 case VK_SUCCESS:
621 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
622 signaled_fences = true;
623 if (!waitAll)
624 goto done;
625 break;
626
627 case VK_TIMEOUT:
628 goto done;
629
630 default:
631 return result;
632 }
633 }
634 }
635
636 if (pending_fences && !signaled_fences) {
637 /* If we've hit this then someone decided to vkWaitForFences before
638 * they've actually submitted any of them to a queue. This is a
639 * fairly pessimal case, so it's ok to lock here and use a standard
640 * pthreads condition variable.
641 */
642 pthread_mutex_lock(&device->mutex);
643
644 /* It's possible that some of the fences have changed state since the
645 * last time we checked. Now that we have the lock, check for
646 * pending fences again and don't wait if it's changed.
647 */
648 uint32_t now_pending_fences = 0;
649 for (uint32_t i = 0; i < fenceCount; i++) {
650 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
651 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
652 now_pending_fences++;
653 }
654 assert(now_pending_fences <= pending_fences);
655
656 if (now_pending_fences == pending_fences) {
657 struct timespec abstime = {
658 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
659 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
660 };
661
662 ASSERTED int ret;
663 ret = pthread_cond_timedwait(&device->queue_submit,
664 &device->mutex, &abstime);
665 assert(ret != EINVAL);
666 if (anv_gettime_ns() >= abs_timeout_ns) {
667 pthread_mutex_unlock(&device->mutex);
668 result = VK_TIMEOUT;
669 goto done;
670 }
671 }
672
673 pthread_mutex_unlock(&device->mutex);
674 }
675 }
676
677 done:
678 if (anv_device_is_lost(device))
679 return VK_ERROR_DEVICE_LOST;
680
681 return result;
682 }
683
684 static VkResult
685 anv_wait_for_wsi_fence(struct anv_device *device,
686 const VkFence _fence,
687 uint64_t abs_timeout)
688 {
689 ANV_FROM_HANDLE(anv_fence, fence, _fence);
690 struct anv_fence_impl *impl = &fence->permanent;
691
692 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
693 }
694
695 static VkResult
696 anv_wait_for_fences(struct anv_device *device,
697 uint32_t fenceCount,
698 const VkFence *pFences,
699 bool waitAll,
700 uint64_t abs_timeout)
701 {
702 VkResult result = VK_SUCCESS;
703
704 if (fenceCount <= 1 || waitAll) {
705 for (uint32_t i = 0; i < fenceCount; i++) {
706 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
707 switch (fence->permanent.type) {
708 case ANV_FENCE_TYPE_BO:
709 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
710 true, abs_timeout);
711 break;
712 case ANV_FENCE_TYPE_SYNCOBJ:
713 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
714 true, abs_timeout);
715 break;
716 case ANV_FENCE_TYPE_WSI:
717 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
718 break;
719 case ANV_FENCE_TYPE_NONE:
720 result = VK_SUCCESS;
721 break;
722 }
723 if (result != VK_SUCCESS)
724 return result;
725 }
726 } else {
727 do {
728 for (uint32_t i = 0; i < fenceCount; i++) {
729 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
730 return VK_SUCCESS;
731 }
732 } while (anv_gettime_ns() < abs_timeout);
733 result = VK_TIMEOUT;
734 }
735 return result;
736 }
737
738 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
739 {
740 for (uint32_t i = 0; i < fenceCount; ++i) {
741 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
742 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
743 return false;
744 }
745 return true;
746 }
747
748 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
749 {
750 for (uint32_t i = 0; i < fenceCount; ++i) {
751 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
752 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
753 return false;
754 }
755 return true;
756 }
757
758 VkResult anv_WaitForFences(
759 VkDevice _device,
760 uint32_t fenceCount,
761 const VkFence* pFences,
762 VkBool32 waitAll,
763 uint64_t timeout)
764 {
765 ANV_FROM_HANDLE(anv_device, device, _device);
766
767 if (anv_device_is_lost(device))
768 return VK_ERROR_DEVICE_LOST;
769
770 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
771 if (anv_all_fences_syncobj(fenceCount, pFences)) {
772 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
773 waitAll, abs_timeout);
774 } else if (anv_all_fences_bo(fenceCount, pFences)) {
775 return anv_wait_for_bo_fences(device, fenceCount, pFences,
776 waitAll, abs_timeout);
777 } else {
778 return anv_wait_for_fences(device, fenceCount, pFences,
779 waitAll, abs_timeout);
780 }
781 }
782
783 void anv_GetPhysicalDeviceExternalFenceProperties(
784 VkPhysicalDevice physicalDevice,
785 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
786 VkExternalFenceProperties* pExternalFenceProperties)
787 {
788 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
789
790 switch (pExternalFenceInfo->handleType) {
791 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
792 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
793 if (device->has_syncobj_wait) {
794 pExternalFenceProperties->exportFromImportedHandleTypes =
795 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
796 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
797 pExternalFenceProperties->compatibleHandleTypes =
798 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
799 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
800 pExternalFenceProperties->externalFenceFeatures =
801 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
802 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
803 return;
804 }
805 break;
806
807 default:
808 break;
809 }
810
811 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
812 pExternalFenceProperties->compatibleHandleTypes = 0;
813 pExternalFenceProperties->externalFenceFeatures = 0;
814 }
815
816 VkResult anv_ImportFenceFdKHR(
817 VkDevice _device,
818 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
819 {
820 ANV_FROM_HANDLE(anv_device, device, _device);
821 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
822 int fd = pImportFenceFdInfo->fd;
823
824 assert(pImportFenceFdInfo->sType ==
825 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
826
827 struct anv_fence_impl new_impl = {
828 .type = ANV_FENCE_TYPE_NONE,
829 };
830
831 switch (pImportFenceFdInfo->handleType) {
832 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
833 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
834
835 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
836 if (!new_impl.syncobj)
837 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
838
839 break;
840
841 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
842 /* Sync files are a bit tricky. Because we want to continue using the
843 * syncobj implementation of WaitForFences, we don't use the sync file
844 * directly but instead import it into a syncobj.
845 */
846 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
847
848 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
849 if (!new_impl.syncobj)
850 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
851
852 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
853 anv_gem_syncobj_destroy(device, new_impl.syncobj);
854 return vk_errorf(device->instance, NULL,
855 VK_ERROR_INVALID_EXTERNAL_HANDLE,
856 "syncobj sync file import failed: %m");
857 }
858 break;
859
860 default:
861 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
862 }
863
864 /* From the Vulkan 1.0.53 spec:
865 *
866 * "Importing a fence payload from a file descriptor transfers
867 * ownership of the file descriptor from the application to the
868 * Vulkan implementation. The application must not perform any
869 * operations on the file descriptor after a successful import."
870 *
871 * If the import fails, we leave the file descriptor open.
872 */
873 close(fd);
874
875 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
876 anv_fence_impl_cleanup(device, &fence->temporary);
877 fence->temporary = new_impl;
878 } else {
879 anv_fence_impl_cleanup(device, &fence->permanent);
880 fence->permanent = new_impl;
881 }
882
883 return VK_SUCCESS;
884 }
885
886 VkResult anv_GetFenceFdKHR(
887 VkDevice _device,
888 const VkFenceGetFdInfoKHR* pGetFdInfo,
889 int* pFd)
890 {
891 ANV_FROM_HANDLE(anv_device, device, _device);
892 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
893
894 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
895
896 struct anv_fence_impl *impl =
897 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
898 &fence->temporary : &fence->permanent;
899
900 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
901 switch (pGetFdInfo->handleType) {
902 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
903 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
904 if (fd < 0)
905 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
906
907 *pFd = fd;
908 break;
909 }
910
911 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
912 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
913 if (fd < 0)
914 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
915
916 *pFd = fd;
917 break;
918 }
919
920 default:
921 unreachable("Invalid fence export handle type");
922 }
923
924 /* From the Vulkan 1.0.53 spec:
925 *
926 * "Export operations have the same transference as the specified handle
927 * type’s import operations. [...] If the fence was using a
928 * temporarily imported payload, the fence’s prior permanent payload
929 * will be restored.
930 */
931 if (impl == &fence->temporary)
932 anv_fence_impl_cleanup(device, impl);
933
934 return VK_SUCCESS;
935 }
936
937 // Queue semaphore functions
938
939 VkResult anv_CreateSemaphore(
940 VkDevice _device,
941 const VkSemaphoreCreateInfo* pCreateInfo,
942 const VkAllocationCallbacks* pAllocator,
943 VkSemaphore* pSemaphore)
944 {
945 ANV_FROM_HANDLE(anv_device, device, _device);
946 struct anv_semaphore *semaphore;
947
948 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
949
950 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
951 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
952 if (semaphore == NULL)
953 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
954
955 const VkExportSemaphoreCreateInfo *export =
956 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
957 VkExternalSemaphoreHandleTypeFlags handleTypes =
958 export ? export->handleTypes : 0;
959
960 if (handleTypes == 0) {
961 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
962 * on the same ring. Since we don't expose the blit engine as a DMA
963 * queue, a dummy no-op semaphore is a perfectly valid implementation.
964 */
965 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
966 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
967 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
968 if (device->instance->physicalDevice.has_syncobj) {
969 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
970 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
971 if (!semaphore->permanent.syncobj) {
972 vk_free2(&device->alloc, pAllocator, semaphore);
973 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
974 }
975 } else {
976 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
977 VkResult result = anv_device_alloc_bo(device, 4096,
978 ANV_BO_ALLOC_EXTERNAL |
979 ANV_BO_ALLOC_IMPLICIT_SYNC,
980 &semaphore->permanent.bo);
981 if (result != VK_SUCCESS) {
982 vk_free2(&device->alloc, pAllocator, semaphore);
983 return result;
984 }
985
986 /* If we're going to use this as a fence, we need to *not* have the
987 * EXEC_OBJECT_ASYNC bit set.
988 */
989 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
990 }
991 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
992 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
993 if (device->instance->physicalDevice.has_syncobj) {
994 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
995 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
996 } else {
997 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
998 semaphore->permanent.fd = -1;
999 }
1000 } else {
1001 assert(!"Unknown handle type");
1002 vk_free2(&device->alloc, pAllocator, semaphore);
1003 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1004 }
1005
1006 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
1007
1008 *pSemaphore = anv_semaphore_to_handle(semaphore);
1009
1010 return VK_SUCCESS;
1011 }
1012
1013 static void
1014 anv_semaphore_impl_cleanup(struct anv_device *device,
1015 struct anv_semaphore_impl *impl)
1016 {
1017 switch (impl->type) {
1018 case ANV_SEMAPHORE_TYPE_NONE:
1019 case ANV_SEMAPHORE_TYPE_DUMMY:
1020 /* Dummy. Nothing to do */
1021 break;
1022
1023 case ANV_SEMAPHORE_TYPE_BO:
1024 anv_device_release_bo(device, impl->bo);
1025 break;
1026
1027 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1028 close(impl->fd);
1029 break;
1030
1031 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1032 anv_gem_syncobj_destroy(device, impl->syncobj);
1033 break;
1034
1035 default:
1036 unreachable("Invalid semaphore type");
1037 }
1038
1039 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1040 }
1041
1042 void
1043 anv_semaphore_reset_temporary(struct anv_device *device,
1044 struct anv_semaphore *semaphore)
1045 {
1046 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1047 return;
1048
1049 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1050 }
1051
1052 void anv_DestroySemaphore(
1053 VkDevice _device,
1054 VkSemaphore _semaphore,
1055 const VkAllocationCallbacks* pAllocator)
1056 {
1057 ANV_FROM_HANDLE(anv_device, device, _device);
1058 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1059
1060 if (semaphore == NULL)
1061 return;
1062
1063 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1064 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1065
1066 vk_free2(&device->alloc, pAllocator, semaphore);
1067 }
1068
1069 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1070 VkPhysicalDevice physicalDevice,
1071 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1072 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1073 {
1074 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1075
1076 switch (pExternalSemaphoreInfo->handleType) {
1077 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1078 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1079 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1080 pExternalSemaphoreProperties->compatibleHandleTypes =
1081 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1082 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1083 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1084 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1085 return;
1086
1087 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1088 if (device->has_exec_fence) {
1089 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1090 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1091 pExternalSemaphoreProperties->compatibleHandleTypes =
1092 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1093 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1094 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1095 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1096 return;
1097 }
1098 break;
1099
1100 default:
1101 break;
1102 }
1103
1104 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1105 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1106 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1107 }
1108
1109 VkResult anv_ImportSemaphoreFdKHR(
1110 VkDevice _device,
1111 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1112 {
1113 ANV_FROM_HANDLE(anv_device, device, _device);
1114 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1115 int fd = pImportSemaphoreFdInfo->fd;
1116
1117 struct anv_semaphore_impl new_impl = {
1118 .type = ANV_SEMAPHORE_TYPE_NONE,
1119 };
1120
1121 switch (pImportSemaphoreFdInfo->handleType) {
1122 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1123 if (device->instance->physicalDevice.has_syncobj) {
1124 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1125
1126 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1127 if (!new_impl.syncobj)
1128 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1129 } else {
1130 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1131
1132 VkResult result = anv_device_import_bo(device, fd,
1133 ANV_BO_ALLOC_EXTERNAL |
1134 ANV_BO_ALLOC_IMPLICIT_SYNC,
1135 &new_impl.bo);
1136 if (result != VK_SUCCESS)
1137 return result;
1138
1139 if (new_impl.bo->size < 4096) {
1140 anv_device_release_bo(device, new_impl.bo);
1141 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1142 }
1143
1144 /* If we're going to use this as a fence, we need to *not* have the
1145 * EXEC_OBJECT_ASYNC bit set.
1146 */
1147 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1148 }
1149
1150 /* From the Vulkan spec:
1151 *
1152 * "Importing semaphore state from a file descriptor transfers
1153 * ownership of the file descriptor from the application to the
1154 * Vulkan implementation. The application must not perform any
1155 * operations on the file descriptor after a successful import."
1156 *
1157 * If the import fails, we leave the file descriptor open.
1158 */
1159 close(fd);
1160 break;
1161
1162 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1163 if (device->instance->physicalDevice.has_syncobj) {
1164 new_impl = (struct anv_semaphore_impl) {
1165 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1166 .syncobj = anv_gem_syncobj_create(device, 0),
1167 };
1168 if (!new_impl.syncobj)
1169 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1170 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1171 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1172 return vk_errorf(device->instance, NULL,
1173 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1174 "syncobj sync file import failed: %m");
1175 }
1176 /* Ownership of the FD is transfered to Anv. Since we don't need it
1177 * anymore because the associated fence has been put into a syncobj,
1178 * we must close the FD.
1179 */
1180 close(fd);
1181 } else {
1182 new_impl = (struct anv_semaphore_impl) {
1183 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1184 .fd = fd,
1185 };
1186 }
1187 break;
1188
1189 default:
1190 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1191 }
1192
1193 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1194 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1195 semaphore->temporary = new_impl;
1196 } else {
1197 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1198 semaphore->permanent = new_impl;
1199 }
1200
1201 return VK_SUCCESS;
1202 }
1203
1204 VkResult anv_GetSemaphoreFdKHR(
1205 VkDevice _device,
1206 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1207 int* pFd)
1208 {
1209 ANV_FROM_HANDLE(anv_device, device, _device);
1210 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1211 VkResult result;
1212 int fd;
1213
1214 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1215
1216 struct anv_semaphore_impl *impl =
1217 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1218 &semaphore->temporary : &semaphore->permanent;
1219
1220 switch (impl->type) {
1221 case ANV_SEMAPHORE_TYPE_BO:
1222 result = anv_device_export_bo(device, impl->bo, pFd);
1223 if (result != VK_SUCCESS)
1224 return result;
1225 break;
1226
1227 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1228 /* There are two reasons why this could happen:
1229 *
1230 * 1) The user is trying to export without submitting something that
1231 * signals the semaphore. If this is the case, it's their bug so
1232 * what we return here doesn't matter.
1233 *
1234 * 2) The kernel didn't give us a file descriptor. The most likely
1235 * reason for this is running out of file descriptors.
1236 */
1237 if (impl->fd < 0)
1238 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1239
1240 *pFd = impl->fd;
1241
1242 /* From the Vulkan 1.0.53 spec:
1243 *
1244 * "...exporting a semaphore payload to a handle with copy
1245 * transference has the same side effects on the source
1246 * semaphore’s payload as executing a semaphore wait operation."
1247 *
1248 * In other words, it may still be a SYNC_FD semaphore, but it's now
1249 * considered to have been waited on and no longer has a sync file
1250 * attached.
1251 */
1252 impl->fd = -1;
1253 return VK_SUCCESS;
1254
1255 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1256 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
1257 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1258 else {
1259 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1260 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1261 }
1262 if (fd < 0)
1263 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1264 *pFd = fd;
1265 break;
1266
1267 default:
1268 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1269 }
1270
1271 /* From the Vulkan 1.0.53 spec:
1272 *
1273 * "Export operations have the same transference as the specified handle
1274 * type’s import operations. [...] If the semaphore was using a
1275 * temporarily imported payload, the semaphore’s prior permanent payload
1276 * will be restored.
1277 */
1278 if (impl == &semaphore->temporary)
1279 anv_semaphore_impl_cleanup(device, impl);
1280
1281 return VK_SUCCESS;
1282 }