5cbfd0287af7df5725179b626c9152b320563786
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30
31 #include "anv_private.h"
32 #include "vk_util.h"
33
34 #include "genxml/gen7_pack.h"
35
36 uint64_t anv_gettime_ns(void)
37 {
38 struct timespec current;
39 clock_gettime(CLOCK_MONOTONIC, &current);
40 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
41 }
42
43 uint64_t anv_get_absolute_timeout(uint64_t timeout)
44 {
45 if (timeout == 0)
46 return 0;
47 uint64_t current_time = anv_gettime_ns();
48 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
49
50 timeout = MIN2(max_timeout, timeout);
51
52 return (current_time + timeout);
53 }
54
55 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
56 {
57 uint64_t now = anv_gettime_ns();
58
59 /* We don't want negative timeouts.
60 *
61 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
62 * supposed to block indefinitely timeouts < 0. Unfortunately,
63 * this was broken for a couple of kernel releases. Since there's
64 * no way to know whether or not the kernel we're using is one of
65 * the broken ones, the best we can do is to clamp the timeout to
66 * INT64_MAX. This limits the maximum timeout from 584 years to
67 * 292 years - likely not a big deal.
68 */
69 if (abs_timeout < now)
70 return 0;
71
72 uint64_t rel_timeout = abs_timeout - now;
73 if (rel_timeout > (uint64_t) INT64_MAX)
74 rel_timeout = INT64_MAX;
75
76 return rel_timeout;
77 }
78
79 VkResult
80 anv_device_execbuf(struct anv_device *device,
81 struct drm_i915_gem_execbuffer2 *execbuf,
82 struct anv_bo **execbuf_bos)
83 {
84 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
85 if (ret != 0) {
86 /* We don't know the real error. */
87 return anv_device_set_lost(device, "execbuf2 failed: %m");
88 }
89
90 struct drm_i915_gem_exec_object2 *objects =
91 (void *)(uintptr_t)execbuf->buffers_ptr;
92 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
93 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
94 assert(execbuf_bos[k]->offset == objects[k].offset);
95 execbuf_bos[k]->offset = objects[k].offset;
96 }
97
98 return VK_SUCCESS;
99 }
100
101 VkResult
102 anv_device_submit_simple_batch(struct anv_device *device,
103 struct anv_batch *batch)
104 {
105 struct drm_i915_gem_execbuffer2 execbuf;
106 struct drm_i915_gem_exec_object2 exec2_objects[1];
107 struct anv_bo *bo;
108 VkResult result = VK_SUCCESS;
109 uint32_t size;
110
111 /* Kernel driver requires 8 byte aligned batch length */
112 size = align_u32(batch->next - batch->start, 8);
113 result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &bo);
114 if (result != VK_SUCCESS)
115 return result;
116
117 memcpy(bo->map, batch->start, size);
118 if (!device->info.has_llc)
119 gen_flush_range(bo->map, size);
120
121 exec2_objects[0].handle = bo->gem_handle;
122 exec2_objects[0].relocation_count = 0;
123 exec2_objects[0].relocs_ptr = 0;
124 exec2_objects[0].alignment = 0;
125 exec2_objects[0].offset = bo->offset;
126 exec2_objects[0].flags = bo->flags;
127 exec2_objects[0].rsvd1 = 0;
128 exec2_objects[0].rsvd2 = 0;
129
130 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
131 execbuf.buffer_count = 1;
132 execbuf.batch_start_offset = 0;
133 execbuf.batch_len = size;
134 execbuf.cliprects_ptr = 0;
135 execbuf.num_cliprects = 0;
136 execbuf.DR1 = 0;
137 execbuf.DR4 = 0;
138
139 execbuf.flags =
140 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
141 execbuf.rsvd1 = device->context_id;
142 execbuf.rsvd2 = 0;
143
144 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
145 gen_print_batch(&device->decoder_ctx, bo->map,
146 bo->size, bo->offset, false);
147 }
148
149 result = anv_device_execbuf(device, &execbuf, &bo);
150 if (result != VK_SUCCESS)
151 goto fail;
152
153 result = anv_device_wait(device, bo, INT64_MAX);
154
155 fail:
156 anv_bo_pool_free(&device->batch_bo_pool, bo);
157
158 return result;
159 }
160
161 VkResult anv_QueueSubmit(
162 VkQueue _queue,
163 uint32_t submitCount,
164 const VkSubmitInfo* pSubmits,
165 VkFence fence)
166 {
167 ANV_FROM_HANDLE(anv_queue, queue, _queue);
168 struct anv_device *device = queue->device;
169
170 /* Query for device status prior to submitting. Technically, we don't need
171 * to do this. However, if we have a client that's submitting piles of
172 * garbage, we would rather break as early as possible to keep the GPU
173 * hanging contained. If we don't check here, we'll either be waiting for
174 * the kernel to kick us or we'll have to wait until the client waits on a
175 * fence before we actually know whether or not we've hung.
176 */
177 VkResult result = anv_device_query_status(device);
178 if (result != VK_SUCCESS)
179 return result;
180
181 /* We lock around QueueSubmit for three main reasons:
182 *
183 * 1) When a block pool is resized, we create a new gem handle with a
184 * different size and, in the case of surface states, possibly a
185 * different center offset but we re-use the same anv_bo struct when
186 * we do so. If this happens in the middle of setting up an execbuf,
187 * we could end up with our list of BOs out of sync with our list of
188 * gem handles.
189 *
190 * 2) The algorithm we use for building the list of unique buffers isn't
191 * thread-safe. While the client is supposed to syncronize around
192 * QueueSubmit, this would be extremely difficult to debug if it ever
193 * came up in the wild due to a broken app. It's better to play it
194 * safe and just lock around QueueSubmit.
195 *
196 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
197 * userspace. Due to the fact that the surface state buffer is shared
198 * between batches, we can't afford to have that happen from multiple
199 * threads at the same time. Even though the user is supposed to
200 * ensure this doesn't happen, we play it safe as in (2) above.
201 *
202 * Since the only other things that ever take the device lock such as block
203 * pool resize only rarely happen, this will almost never be contended so
204 * taking a lock isn't really an expensive operation in this case.
205 */
206 pthread_mutex_lock(&device->mutex);
207
208 if (fence && submitCount == 0) {
209 /* If we don't have any command buffers, we need to submit a dummy
210 * batch to give GEM something to wait on. We could, potentially,
211 * come up with something more efficient but this shouldn't be a
212 * common case.
213 */
214 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
215 goto out;
216 }
217
218 for (uint32_t i = 0; i < submitCount; i++) {
219 /* Fence for this submit. NULL for all but the last one */
220 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
221
222 if (pSubmits[i].commandBufferCount == 0) {
223 /* If we don't have any command buffers, we need to submit a dummy
224 * batch to give GEM something to wait on. We could, potentially,
225 * come up with something more efficient but this shouldn't be a
226 * common case.
227 */
228 result = anv_cmd_buffer_execbuf(device, NULL,
229 pSubmits[i].pWaitSemaphores,
230 pSubmits[i].waitSemaphoreCount,
231 pSubmits[i].pSignalSemaphores,
232 pSubmits[i].signalSemaphoreCount,
233 submit_fence);
234 if (result != VK_SUCCESS)
235 goto out;
236
237 continue;
238 }
239
240 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
241 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
242 pSubmits[i].pCommandBuffers[j]);
243 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
244 assert(!anv_batch_has_error(&cmd_buffer->batch));
245
246 /* Fence for this execbuf. NULL for all but the last one */
247 VkFence execbuf_fence =
248 (j == pSubmits[i].commandBufferCount - 1) ?
249 submit_fence : VK_NULL_HANDLE;
250
251 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
252 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
253 if (j == 0) {
254 /* Only the first batch gets the in semaphores */
255 in_semaphores = pSubmits[i].pWaitSemaphores;
256 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
257 }
258
259 if (j == pSubmits[i].commandBufferCount - 1) {
260 /* Only the last batch gets the out semaphores */
261 out_semaphores = pSubmits[i].pSignalSemaphores;
262 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
263 }
264
265 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
266 in_semaphores, num_in_semaphores,
267 out_semaphores, num_out_semaphores,
268 execbuf_fence);
269 if (result != VK_SUCCESS)
270 goto out;
271 }
272 }
273
274 pthread_cond_broadcast(&device->queue_submit);
275
276 out:
277 if (result != VK_SUCCESS) {
278 /* In the case that something has gone wrong we may end up with an
279 * inconsistent state from which it may not be trivial to recover.
280 * For example, we might have computed address relocations and
281 * any future attempt to re-submit this job will need to know about
282 * this and avoid computing relocation addresses again.
283 *
284 * To avoid this sort of issues, we assume that if something was
285 * wrong during submission we must already be in a really bad situation
286 * anyway (such us being out of memory) and return
287 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
288 * submit the same job again to this device.
289 */
290 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
291 }
292
293 pthread_mutex_unlock(&device->mutex);
294
295 return result;
296 }
297
298 VkResult anv_QueueWaitIdle(
299 VkQueue _queue)
300 {
301 ANV_FROM_HANDLE(anv_queue, queue, _queue);
302
303 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
304 }
305
306 VkResult anv_CreateFence(
307 VkDevice _device,
308 const VkFenceCreateInfo* pCreateInfo,
309 const VkAllocationCallbacks* pAllocator,
310 VkFence* pFence)
311 {
312 ANV_FROM_HANDLE(anv_device, device, _device);
313 struct anv_fence *fence;
314
315 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
316
317 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
318 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
319 if (fence == NULL)
320 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
321
322 if (device->instance->physicalDevice.has_syncobj_wait) {
323 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
324
325 uint32_t create_flags = 0;
326 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
327 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
328
329 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
330 if (!fence->permanent.syncobj)
331 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
332 } else {
333 fence->permanent.type = ANV_FENCE_TYPE_BO;
334
335 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
336 &fence->permanent.bo.bo);
337 if (result != VK_SUCCESS)
338 return result;
339
340 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
341 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
342 } else {
343 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
344 }
345 }
346
347 *pFence = anv_fence_to_handle(fence);
348
349 return VK_SUCCESS;
350 }
351
352 static void
353 anv_fence_impl_cleanup(struct anv_device *device,
354 struct anv_fence_impl *impl)
355 {
356 switch (impl->type) {
357 case ANV_FENCE_TYPE_NONE:
358 /* Dummy. Nothing to do */
359 break;
360
361 case ANV_FENCE_TYPE_BO:
362 anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
363 break;
364
365 case ANV_FENCE_TYPE_SYNCOBJ:
366 anv_gem_syncobj_destroy(device, impl->syncobj);
367 break;
368
369 case ANV_FENCE_TYPE_WSI:
370 impl->fence_wsi->destroy(impl->fence_wsi);
371 break;
372
373 default:
374 unreachable("Invalid fence type");
375 }
376
377 impl->type = ANV_FENCE_TYPE_NONE;
378 }
379
380 void anv_DestroyFence(
381 VkDevice _device,
382 VkFence _fence,
383 const VkAllocationCallbacks* pAllocator)
384 {
385 ANV_FROM_HANDLE(anv_device, device, _device);
386 ANV_FROM_HANDLE(anv_fence, fence, _fence);
387
388 if (!fence)
389 return;
390
391 anv_fence_impl_cleanup(device, &fence->temporary);
392 anv_fence_impl_cleanup(device, &fence->permanent);
393
394 vk_free2(&device->alloc, pAllocator, fence);
395 }
396
397 VkResult anv_ResetFences(
398 VkDevice _device,
399 uint32_t fenceCount,
400 const VkFence* pFences)
401 {
402 ANV_FROM_HANDLE(anv_device, device, _device);
403
404 for (uint32_t i = 0; i < fenceCount; i++) {
405 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
406
407 /* From the Vulkan 1.0.53 spec:
408 *
409 * "If any member of pFences currently has its payload imported with
410 * temporary permanence, that fence’s prior permanent payload is
411 * first restored. The remaining operations described therefore
412 * operate on the restored payload.
413 */
414 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
415 anv_fence_impl_cleanup(device, &fence->temporary);
416
417 struct anv_fence_impl *impl = &fence->permanent;
418
419 switch (impl->type) {
420 case ANV_FENCE_TYPE_BO:
421 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
422 break;
423
424 case ANV_FENCE_TYPE_SYNCOBJ:
425 anv_gem_syncobj_reset(device, impl->syncobj);
426 break;
427
428 default:
429 unreachable("Invalid fence type");
430 }
431 }
432
433 return VK_SUCCESS;
434 }
435
436 VkResult anv_GetFenceStatus(
437 VkDevice _device,
438 VkFence _fence)
439 {
440 ANV_FROM_HANDLE(anv_device, device, _device);
441 ANV_FROM_HANDLE(anv_fence, fence, _fence);
442
443 if (anv_device_is_lost(device))
444 return VK_ERROR_DEVICE_LOST;
445
446 struct anv_fence_impl *impl =
447 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
448 &fence->temporary : &fence->permanent;
449
450 switch (impl->type) {
451 case ANV_FENCE_TYPE_BO:
452 /* BO fences don't support import/export */
453 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
454 switch (impl->bo.state) {
455 case ANV_BO_FENCE_STATE_RESET:
456 /* If it hasn't even been sent off to the GPU yet, it's not ready */
457 return VK_NOT_READY;
458
459 case ANV_BO_FENCE_STATE_SIGNALED:
460 /* It's been signaled, return success */
461 return VK_SUCCESS;
462
463 case ANV_BO_FENCE_STATE_SUBMITTED: {
464 VkResult result = anv_device_bo_busy(device, impl->bo.bo);
465 if (result == VK_SUCCESS) {
466 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
467 return VK_SUCCESS;
468 } else {
469 return result;
470 }
471 }
472 default:
473 unreachable("Invalid fence status");
474 }
475
476 case ANV_FENCE_TYPE_SYNCOBJ: {
477 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
478 if (ret == -1) {
479 if (errno == ETIME) {
480 return VK_NOT_READY;
481 } else {
482 /* We don't know the real error. */
483 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
484 }
485 } else {
486 return VK_SUCCESS;
487 }
488 }
489
490 default:
491 unreachable("Invalid fence type");
492 }
493 }
494
495 static VkResult
496 anv_wait_for_syncobj_fences(struct anv_device *device,
497 uint32_t fenceCount,
498 const VkFence *pFences,
499 bool waitAll,
500 uint64_t abs_timeout_ns)
501 {
502 uint32_t *syncobjs = vk_zalloc(&device->alloc,
503 sizeof(*syncobjs) * fenceCount, 8,
504 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
505 if (!syncobjs)
506 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
507
508 for (uint32_t i = 0; i < fenceCount; i++) {
509 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
510 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
511
512 struct anv_fence_impl *impl =
513 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
514 &fence->temporary : &fence->permanent;
515
516 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
517 syncobjs[i] = impl->syncobj;
518 }
519
520 /* The gem_syncobj_wait ioctl may return early due to an inherent
521 * limitation in the way it computes timeouts. Loop until we've actually
522 * passed the timeout.
523 */
524 int ret;
525 do {
526 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
527 abs_timeout_ns, waitAll);
528 } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
529
530 vk_free(&device->alloc, syncobjs);
531
532 if (ret == -1) {
533 if (errno == ETIME) {
534 return VK_TIMEOUT;
535 } else {
536 /* We don't know the real error. */
537 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
538 }
539 } else {
540 return VK_SUCCESS;
541 }
542 }
543
544 static VkResult
545 anv_wait_for_bo_fences(struct anv_device *device,
546 uint32_t fenceCount,
547 const VkFence *pFences,
548 bool waitAll,
549 uint64_t abs_timeout_ns)
550 {
551 VkResult result = VK_SUCCESS;
552 uint32_t pending_fences = fenceCount;
553 while (pending_fences) {
554 pending_fences = 0;
555 bool signaled_fences = false;
556 for (uint32_t i = 0; i < fenceCount; i++) {
557 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
558
559 /* This function assumes that all fences are BO fences and that they
560 * have no temporary state. Since BO fences will never be exported,
561 * this should be a safe assumption.
562 */
563 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
564 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
565 struct anv_fence_impl *impl = &fence->permanent;
566
567 switch (impl->bo.state) {
568 case ANV_BO_FENCE_STATE_RESET:
569 /* This fence hasn't been submitted yet, we'll catch it the next
570 * time around. Yes, this may mean we dead-loop but, short of
571 * lots of locking and a condition variable, there's not much that
572 * we can do about that.
573 */
574 pending_fences++;
575 continue;
576
577 case ANV_BO_FENCE_STATE_SIGNALED:
578 /* This fence is not pending. If waitAll isn't set, we can return
579 * early. Otherwise, we have to keep going.
580 */
581 if (!waitAll) {
582 result = VK_SUCCESS;
583 goto done;
584 }
585 continue;
586
587 case ANV_BO_FENCE_STATE_SUBMITTED:
588 /* These are the fences we really care about. Go ahead and wait
589 * on it until we hit a timeout.
590 */
591 result = anv_device_wait(device, impl->bo.bo,
592 anv_get_relative_timeout(abs_timeout_ns));
593 switch (result) {
594 case VK_SUCCESS:
595 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
596 signaled_fences = true;
597 if (!waitAll)
598 goto done;
599 break;
600
601 case VK_TIMEOUT:
602 goto done;
603
604 default:
605 return result;
606 }
607 }
608 }
609
610 if (pending_fences && !signaled_fences) {
611 /* If we've hit this then someone decided to vkWaitForFences before
612 * they've actually submitted any of them to a queue. This is a
613 * fairly pessimal case, so it's ok to lock here and use a standard
614 * pthreads condition variable.
615 */
616 pthread_mutex_lock(&device->mutex);
617
618 /* It's possible that some of the fences have changed state since the
619 * last time we checked. Now that we have the lock, check for
620 * pending fences again and don't wait if it's changed.
621 */
622 uint32_t now_pending_fences = 0;
623 for (uint32_t i = 0; i < fenceCount; i++) {
624 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
625 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
626 now_pending_fences++;
627 }
628 assert(now_pending_fences <= pending_fences);
629
630 if (now_pending_fences == pending_fences) {
631 struct timespec abstime = {
632 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
633 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
634 };
635
636 ASSERTED int ret;
637 ret = pthread_cond_timedwait(&device->queue_submit,
638 &device->mutex, &abstime);
639 assert(ret != EINVAL);
640 if (anv_gettime_ns() >= abs_timeout_ns) {
641 pthread_mutex_unlock(&device->mutex);
642 result = VK_TIMEOUT;
643 goto done;
644 }
645 }
646
647 pthread_mutex_unlock(&device->mutex);
648 }
649 }
650
651 done:
652 if (anv_device_is_lost(device))
653 return VK_ERROR_DEVICE_LOST;
654
655 return result;
656 }
657
658 static VkResult
659 anv_wait_for_wsi_fence(struct anv_device *device,
660 const VkFence _fence,
661 uint64_t abs_timeout)
662 {
663 ANV_FROM_HANDLE(anv_fence, fence, _fence);
664 struct anv_fence_impl *impl = &fence->permanent;
665
666 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
667 }
668
669 static VkResult
670 anv_wait_for_fences(struct anv_device *device,
671 uint32_t fenceCount,
672 const VkFence *pFences,
673 bool waitAll,
674 uint64_t abs_timeout)
675 {
676 VkResult result = VK_SUCCESS;
677
678 if (fenceCount <= 1 || waitAll) {
679 for (uint32_t i = 0; i < fenceCount; i++) {
680 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
681 switch (fence->permanent.type) {
682 case ANV_FENCE_TYPE_BO:
683 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
684 true, abs_timeout);
685 break;
686 case ANV_FENCE_TYPE_SYNCOBJ:
687 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
688 true, abs_timeout);
689 break;
690 case ANV_FENCE_TYPE_WSI:
691 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
692 break;
693 case ANV_FENCE_TYPE_NONE:
694 result = VK_SUCCESS;
695 break;
696 }
697 if (result != VK_SUCCESS)
698 return result;
699 }
700 } else {
701 do {
702 for (uint32_t i = 0; i < fenceCount; i++) {
703 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
704 return VK_SUCCESS;
705 }
706 } while (anv_gettime_ns() < abs_timeout);
707 result = VK_TIMEOUT;
708 }
709 return result;
710 }
711
712 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
713 {
714 for (uint32_t i = 0; i < fenceCount; ++i) {
715 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
716 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
717 return false;
718 }
719 return true;
720 }
721
722 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
723 {
724 for (uint32_t i = 0; i < fenceCount; ++i) {
725 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
726 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
727 return false;
728 }
729 return true;
730 }
731
732 VkResult anv_WaitForFences(
733 VkDevice _device,
734 uint32_t fenceCount,
735 const VkFence* pFences,
736 VkBool32 waitAll,
737 uint64_t timeout)
738 {
739 ANV_FROM_HANDLE(anv_device, device, _device);
740
741 if (anv_device_is_lost(device))
742 return VK_ERROR_DEVICE_LOST;
743
744 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
745 if (anv_all_fences_syncobj(fenceCount, pFences)) {
746 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
747 waitAll, abs_timeout);
748 } else if (anv_all_fences_bo(fenceCount, pFences)) {
749 return anv_wait_for_bo_fences(device, fenceCount, pFences,
750 waitAll, abs_timeout);
751 } else {
752 return anv_wait_for_fences(device, fenceCount, pFences,
753 waitAll, abs_timeout);
754 }
755 }
756
757 void anv_GetPhysicalDeviceExternalFenceProperties(
758 VkPhysicalDevice physicalDevice,
759 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
760 VkExternalFenceProperties* pExternalFenceProperties)
761 {
762 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
763
764 switch (pExternalFenceInfo->handleType) {
765 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
766 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
767 if (device->has_syncobj_wait) {
768 pExternalFenceProperties->exportFromImportedHandleTypes =
769 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
770 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
771 pExternalFenceProperties->compatibleHandleTypes =
772 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
773 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
774 pExternalFenceProperties->externalFenceFeatures =
775 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
776 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
777 return;
778 }
779 break;
780
781 default:
782 break;
783 }
784
785 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
786 pExternalFenceProperties->compatibleHandleTypes = 0;
787 pExternalFenceProperties->externalFenceFeatures = 0;
788 }
789
790 VkResult anv_ImportFenceFdKHR(
791 VkDevice _device,
792 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
793 {
794 ANV_FROM_HANDLE(anv_device, device, _device);
795 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
796 int fd = pImportFenceFdInfo->fd;
797
798 assert(pImportFenceFdInfo->sType ==
799 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
800
801 struct anv_fence_impl new_impl = {
802 .type = ANV_FENCE_TYPE_NONE,
803 };
804
805 switch (pImportFenceFdInfo->handleType) {
806 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
807 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
808
809 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
810 if (!new_impl.syncobj)
811 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
812
813 break;
814
815 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
816 /* Sync files are a bit tricky. Because we want to continue using the
817 * syncobj implementation of WaitForFences, we don't use the sync file
818 * directly but instead import it into a syncobj.
819 */
820 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
821
822 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
823 if (!new_impl.syncobj)
824 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
825
826 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
827 anv_gem_syncobj_destroy(device, new_impl.syncobj);
828 return vk_errorf(device->instance, NULL,
829 VK_ERROR_INVALID_EXTERNAL_HANDLE,
830 "syncobj sync file import failed: %m");
831 }
832 break;
833
834 default:
835 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
836 }
837
838 /* From the Vulkan 1.0.53 spec:
839 *
840 * "Importing a fence payload from a file descriptor transfers
841 * ownership of the file descriptor from the application to the
842 * Vulkan implementation. The application must not perform any
843 * operations on the file descriptor after a successful import."
844 *
845 * If the import fails, we leave the file descriptor open.
846 */
847 close(fd);
848
849 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
850 anv_fence_impl_cleanup(device, &fence->temporary);
851 fence->temporary = new_impl;
852 } else {
853 anv_fence_impl_cleanup(device, &fence->permanent);
854 fence->permanent = new_impl;
855 }
856
857 return VK_SUCCESS;
858 }
859
860 VkResult anv_GetFenceFdKHR(
861 VkDevice _device,
862 const VkFenceGetFdInfoKHR* pGetFdInfo,
863 int* pFd)
864 {
865 ANV_FROM_HANDLE(anv_device, device, _device);
866 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
867
868 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
869
870 struct anv_fence_impl *impl =
871 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
872 &fence->temporary : &fence->permanent;
873
874 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
875 switch (pGetFdInfo->handleType) {
876 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
877 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
878 if (fd < 0)
879 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
880
881 *pFd = fd;
882 break;
883 }
884
885 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
886 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
887 if (fd < 0)
888 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
889
890 *pFd = fd;
891 break;
892 }
893
894 default:
895 unreachable("Invalid fence export handle type");
896 }
897
898 /* From the Vulkan 1.0.53 spec:
899 *
900 * "Export operations have the same transference as the specified handle
901 * type’s import operations. [...] If the fence was using a
902 * temporarily imported payload, the fence’s prior permanent payload
903 * will be restored.
904 */
905 if (impl == &fence->temporary)
906 anv_fence_impl_cleanup(device, impl);
907
908 return VK_SUCCESS;
909 }
910
911 // Queue semaphore functions
912
913 VkResult anv_CreateSemaphore(
914 VkDevice _device,
915 const VkSemaphoreCreateInfo* pCreateInfo,
916 const VkAllocationCallbacks* pAllocator,
917 VkSemaphore* pSemaphore)
918 {
919 ANV_FROM_HANDLE(anv_device, device, _device);
920 struct anv_semaphore *semaphore;
921
922 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
923
924 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
925 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
926 if (semaphore == NULL)
927 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
928
929 const VkExportSemaphoreCreateInfo *export =
930 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
931 VkExternalSemaphoreHandleTypeFlags handleTypes =
932 export ? export->handleTypes : 0;
933
934 if (handleTypes == 0) {
935 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
936 * on the same ring. Since we don't expose the blit engine as a DMA
937 * queue, a dummy no-op semaphore is a perfectly valid implementation.
938 */
939 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
940 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
941 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
942 if (device->instance->physicalDevice.has_syncobj) {
943 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
944 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
945 if (!semaphore->permanent.syncobj) {
946 vk_free2(&device->alloc, pAllocator, semaphore);
947 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
948 }
949 } else {
950 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
951 VkResult result = anv_device_alloc_bo(device, 4096,
952 ANV_BO_ALLOC_EXTERNAL |
953 ANV_BO_ALLOC_IMPLICIT_SYNC,
954 &semaphore->permanent.bo);
955 if (result != VK_SUCCESS) {
956 vk_free2(&device->alloc, pAllocator, semaphore);
957 return result;
958 }
959
960 /* If we're going to use this as a fence, we need to *not* have the
961 * EXEC_OBJECT_ASYNC bit set.
962 */
963 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
964 }
965 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
966 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
967 if (device->instance->physicalDevice.has_syncobj) {
968 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
969 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
970 } else {
971 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
972 semaphore->permanent.fd = -1;
973 }
974 } else {
975 assert(!"Unknown handle type");
976 vk_free2(&device->alloc, pAllocator, semaphore);
977 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
978 }
979
980 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
981
982 *pSemaphore = anv_semaphore_to_handle(semaphore);
983
984 return VK_SUCCESS;
985 }
986
987 static void
988 anv_semaphore_impl_cleanup(struct anv_device *device,
989 struct anv_semaphore_impl *impl)
990 {
991 switch (impl->type) {
992 case ANV_SEMAPHORE_TYPE_NONE:
993 case ANV_SEMAPHORE_TYPE_DUMMY:
994 /* Dummy. Nothing to do */
995 break;
996
997 case ANV_SEMAPHORE_TYPE_BO:
998 anv_device_release_bo(device, impl->bo);
999 break;
1000
1001 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1002 close(impl->fd);
1003 break;
1004
1005 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1006 anv_gem_syncobj_destroy(device, impl->syncobj);
1007 break;
1008
1009 default:
1010 unreachable("Invalid semaphore type");
1011 }
1012
1013 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1014 }
1015
1016 void
1017 anv_semaphore_reset_temporary(struct anv_device *device,
1018 struct anv_semaphore *semaphore)
1019 {
1020 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1021 return;
1022
1023 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1024 }
1025
1026 void anv_DestroySemaphore(
1027 VkDevice _device,
1028 VkSemaphore _semaphore,
1029 const VkAllocationCallbacks* pAllocator)
1030 {
1031 ANV_FROM_HANDLE(anv_device, device, _device);
1032 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1033
1034 if (semaphore == NULL)
1035 return;
1036
1037 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1038 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1039
1040 vk_free2(&device->alloc, pAllocator, semaphore);
1041 }
1042
1043 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1044 VkPhysicalDevice physicalDevice,
1045 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1046 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1047 {
1048 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1049
1050 switch (pExternalSemaphoreInfo->handleType) {
1051 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1052 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1053 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1054 pExternalSemaphoreProperties->compatibleHandleTypes =
1055 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1056 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1057 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1058 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1059 return;
1060
1061 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1062 if (device->has_exec_fence) {
1063 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1064 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1065 pExternalSemaphoreProperties->compatibleHandleTypes =
1066 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1067 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1068 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1069 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1070 return;
1071 }
1072 break;
1073
1074 default:
1075 break;
1076 }
1077
1078 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1079 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1080 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1081 }
1082
1083 VkResult anv_ImportSemaphoreFdKHR(
1084 VkDevice _device,
1085 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1086 {
1087 ANV_FROM_HANDLE(anv_device, device, _device);
1088 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1089 int fd = pImportSemaphoreFdInfo->fd;
1090
1091 struct anv_semaphore_impl new_impl = {
1092 .type = ANV_SEMAPHORE_TYPE_NONE,
1093 };
1094
1095 switch (pImportSemaphoreFdInfo->handleType) {
1096 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1097 if (device->instance->physicalDevice.has_syncobj) {
1098 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1099
1100 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1101 if (!new_impl.syncobj)
1102 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1103 } else {
1104 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1105
1106 VkResult result = anv_device_import_bo(device, fd,
1107 ANV_BO_ALLOC_EXTERNAL |
1108 ANV_BO_ALLOC_IMPLICIT_SYNC,
1109 &new_impl.bo);
1110 if (result != VK_SUCCESS)
1111 return result;
1112
1113 if (new_impl.bo->size < 4096) {
1114 anv_device_release_bo(device, new_impl.bo);
1115 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1116 }
1117
1118 /* If we're going to use this as a fence, we need to *not* have the
1119 * EXEC_OBJECT_ASYNC bit set.
1120 */
1121 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1122 }
1123
1124 /* From the Vulkan spec:
1125 *
1126 * "Importing semaphore state from a file descriptor transfers
1127 * ownership of the file descriptor from the application to the
1128 * Vulkan implementation. The application must not perform any
1129 * operations on the file descriptor after a successful import."
1130 *
1131 * If the import fails, we leave the file descriptor open.
1132 */
1133 close(fd);
1134 break;
1135
1136 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1137 if (device->instance->physicalDevice.has_syncobj) {
1138 new_impl = (struct anv_semaphore_impl) {
1139 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1140 .syncobj = anv_gem_syncobj_create(device, 0),
1141 };
1142 if (!new_impl.syncobj)
1143 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1144 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1145 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1146 return vk_errorf(device->instance, NULL,
1147 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1148 "syncobj sync file import failed: %m");
1149 }
1150 /* Ownership of the FD is transfered to Anv. Since we don't need it
1151 * anymore because the associated fence has been put into a syncobj,
1152 * we must close the FD.
1153 */
1154 close(fd);
1155 } else {
1156 new_impl = (struct anv_semaphore_impl) {
1157 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1158 .fd = fd,
1159 };
1160 }
1161 break;
1162
1163 default:
1164 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1165 }
1166
1167 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1168 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1169 semaphore->temporary = new_impl;
1170 } else {
1171 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1172 semaphore->permanent = new_impl;
1173 }
1174
1175 return VK_SUCCESS;
1176 }
1177
1178 VkResult anv_GetSemaphoreFdKHR(
1179 VkDevice _device,
1180 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1181 int* pFd)
1182 {
1183 ANV_FROM_HANDLE(anv_device, device, _device);
1184 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1185 VkResult result;
1186 int fd;
1187
1188 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1189
1190 struct anv_semaphore_impl *impl =
1191 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1192 &semaphore->temporary : &semaphore->permanent;
1193
1194 switch (impl->type) {
1195 case ANV_SEMAPHORE_TYPE_BO:
1196 result = anv_device_export_bo(device, impl->bo, pFd);
1197 if (result != VK_SUCCESS)
1198 return result;
1199 break;
1200
1201 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1202 /* There are two reasons why this could happen:
1203 *
1204 * 1) The user is trying to export without submitting something that
1205 * signals the semaphore. If this is the case, it's their bug so
1206 * what we return here doesn't matter.
1207 *
1208 * 2) The kernel didn't give us a file descriptor. The most likely
1209 * reason for this is running out of file descriptors.
1210 */
1211 if (impl->fd < 0)
1212 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1213
1214 *pFd = impl->fd;
1215
1216 /* From the Vulkan 1.0.53 spec:
1217 *
1218 * "...exporting a semaphore payload to a handle with copy
1219 * transference has the same side effects on the source
1220 * semaphore’s payload as executing a semaphore wait operation."
1221 *
1222 * In other words, it may still be a SYNC_FD semaphore, but it's now
1223 * considered to have been waited on and no longer has a sync file
1224 * attached.
1225 */
1226 impl->fd = -1;
1227 return VK_SUCCESS;
1228
1229 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1230 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
1231 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1232 else {
1233 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1234 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1235 }
1236 if (fd < 0)
1237 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1238 *pFd = fd;
1239 break;
1240
1241 default:
1242 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1243 }
1244
1245 /* From the Vulkan 1.0.53 spec:
1246 *
1247 * "Export operations have the same transference as the specified handle
1248 * type’s import operations. [...] If the semaphore was using a
1249 * temporarily imported payload, the semaphore’s prior permanent payload
1250 * will be restored.
1251 */
1252 if (impl == &semaphore->temporary)
1253 anv_semaphore_impl_cleanup(device, impl);
1254
1255 return VK_SUCCESS;
1256 }