anv: Return VK_ERROR_DEVICE_LOST from anv_device_set_lost
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 VkResult
38 anv_device_execbuf(struct anv_device *device,
39 struct drm_i915_gem_execbuffer2 *execbuf,
40 struct anv_bo **execbuf_bos)
41 {
42 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
43 if (ret != 0) {
44 /* We don't know the real error. */
45 return anv_device_set_lost(device, "execbuf2 failed: %m");
46 }
47
48 struct drm_i915_gem_exec_object2 *objects =
49 (void *)(uintptr_t)execbuf->buffers_ptr;
50 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
51 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
52 assert(execbuf_bos[k]->offset == objects[k].offset);
53 execbuf_bos[k]->offset = objects[k].offset;
54 }
55
56 return VK_SUCCESS;
57 }
58
59 VkResult
60 anv_device_submit_simple_batch(struct anv_device *device,
61 struct anv_batch *batch)
62 {
63 struct drm_i915_gem_execbuffer2 execbuf;
64 struct drm_i915_gem_exec_object2 exec2_objects[1];
65 struct anv_bo bo, *exec_bos[1];
66 VkResult result = VK_SUCCESS;
67 uint32_t size;
68
69 /* Kernel driver requires 8 byte aligned batch length */
70 size = align_u32(batch->next - batch->start, 8);
71 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
72 if (result != VK_SUCCESS)
73 return result;
74
75 memcpy(bo.map, batch->start, size);
76 if (!device->info.has_llc)
77 gen_flush_range(bo.map, size);
78
79 exec_bos[0] = &bo;
80 exec2_objects[0].handle = bo.gem_handle;
81 exec2_objects[0].relocation_count = 0;
82 exec2_objects[0].relocs_ptr = 0;
83 exec2_objects[0].alignment = 0;
84 exec2_objects[0].offset = bo.offset;
85 exec2_objects[0].flags = bo.flags;
86 exec2_objects[0].rsvd1 = 0;
87 exec2_objects[0].rsvd2 = 0;
88
89 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
90 execbuf.buffer_count = 1;
91 execbuf.batch_start_offset = 0;
92 execbuf.batch_len = size;
93 execbuf.cliprects_ptr = 0;
94 execbuf.num_cliprects = 0;
95 execbuf.DR1 = 0;
96 execbuf.DR4 = 0;
97
98 execbuf.flags =
99 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
100 execbuf.rsvd1 = device->context_id;
101 execbuf.rsvd2 = 0;
102
103 result = anv_device_execbuf(device, &execbuf, exec_bos);
104 if (result != VK_SUCCESS)
105 goto fail;
106
107 result = anv_device_wait(device, &bo, INT64_MAX);
108
109 fail:
110 anv_bo_pool_free(&device->batch_bo_pool, &bo);
111
112 return result;
113 }
114
115 VkResult anv_QueueSubmit(
116 VkQueue _queue,
117 uint32_t submitCount,
118 const VkSubmitInfo* pSubmits,
119 VkFence fence)
120 {
121 ANV_FROM_HANDLE(anv_queue, queue, _queue);
122 struct anv_device *device = queue->device;
123
124 /* Query for device status prior to submitting. Technically, we don't need
125 * to do this. However, if we have a client that's submitting piles of
126 * garbage, we would rather break as early as possible to keep the GPU
127 * hanging contained. If we don't check here, we'll either be waiting for
128 * the kernel to kick us or we'll have to wait until the client waits on a
129 * fence before we actually know whether or not we've hung.
130 */
131 VkResult result = anv_device_query_status(device);
132 if (result != VK_SUCCESS)
133 return result;
134
135 /* We lock around QueueSubmit for three main reasons:
136 *
137 * 1) When a block pool is resized, we create a new gem handle with a
138 * different size and, in the case of surface states, possibly a
139 * different center offset but we re-use the same anv_bo struct when
140 * we do so. If this happens in the middle of setting up an execbuf,
141 * we could end up with our list of BOs out of sync with our list of
142 * gem handles.
143 *
144 * 2) The algorithm we use for building the list of unique buffers isn't
145 * thread-safe. While the client is supposed to syncronize around
146 * QueueSubmit, this would be extremely difficult to debug if it ever
147 * came up in the wild due to a broken app. It's better to play it
148 * safe and just lock around QueueSubmit.
149 *
150 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
151 * userspace. Due to the fact that the surface state buffer is shared
152 * between batches, we can't afford to have that happen from multiple
153 * threads at the same time. Even though the user is supposed to
154 * ensure this doesn't happen, we play it safe as in (2) above.
155 *
156 * Since the only other things that ever take the device lock such as block
157 * pool resize only rarely happen, this will almost never be contended so
158 * taking a lock isn't really an expensive operation in this case.
159 */
160 pthread_mutex_lock(&device->mutex);
161
162 if (fence && submitCount == 0) {
163 /* If we don't have any command buffers, we need to submit a dummy
164 * batch to give GEM something to wait on. We could, potentially,
165 * come up with something more efficient but this shouldn't be a
166 * common case.
167 */
168 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
169 goto out;
170 }
171
172 for (uint32_t i = 0; i < submitCount; i++) {
173 /* Fence for this submit. NULL for all but the last one */
174 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
175
176 if (pSubmits[i].commandBufferCount == 0) {
177 /* If we don't have any command buffers, we need to submit a dummy
178 * batch to give GEM something to wait on. We could, potentially,
179 * come up with something more efficient but this shouldn't be a
180 * common case.
181 */
182 result = anv_cmd_buffer_execbuf(device, NULL,
183 pSubmits[i].pWaitSemaphores,
184 pSubmits[i].waitSemaphoreCount,
185 pSubmits[i].pSignalSemaphores,
186 pSubmits[i].signalSemaphoreCount,
187 submit_fence);
188 if (result != VK_SUCCESS)
189 goto out;
190
191 continue;
192 }
193
194 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
195 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
196 pSubmits[i].pCommandBuffers[j]);
197 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
198 assert(!anv_batch_has_error(&cmd_buffer->batch));
199
200 /* Fence for this execbuf. NULL for all but the last one */
201 VkFence execbuf_fence =
202 (j == pSubmits[i].commandBufferCount - 1) ?
203 submit_fence : VK_NULL_HANDLE;
204
205 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
206 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
207 if (j == 0) {
208 /* Only the first batch gets the in semaphores */
209 in_semaphores = pSubmits[i].pWaitSemaphores;
210 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
211 }
212
213 if (j == pSubmits[i].commandBufferCount - 1) {
214 /* Only the last batch gets the out semaphores */
215 out_semaphores = pSubmits[i].pSignalSemaphores;
216 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
217 }
218
219 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
220 in_semaphores, num_in_semaphores,
221 out_semaphores, num_out_semaphores,
222 execbuf_fence);
223 if (result != VK_SUCCESS)
224 goto out;
225 }
226 }
227
228 pthread_cond_broadcast(&device->queue_submit);
229
230 out:
231 if (result != VK_SUCCESS) {
232 /* In the case that something has gone wrong we may end up with an
233 * inconsistent state from which it may not be trivial to recover.
234 * For example, we might have computed address relocations and
235 * any future attempt to re-submit this job will need to know about
236 * this and avoid computing relocation addresses again.
237 *
238 * To avoid this sort of issues, we assume that if something was
239 * wrong during submission we must already be in a really bad situation
240 * anyway (such us being out of memory) and return
241 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
242 * submit the same job again to this device.
243 */
244 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
245 }
246
247 pthread_mutex_unlock(&device->mutex);
248
249 return result;
250 }
251
252 VkResult anv_QueueWaitIdle(
253 VkQueue _queue)
254 {
255 ANV_FROM_HANDLE(anv_queue, queue, _queue);
256
257 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
258 }
259
260 VkResult anv_CreateFence(
261 VkDevice _device,
262 const VkFenceCreateInfo* pCreateInfo,
263 const VkAllocationCallbacks* pAllocator,
264 VkFence* pFence)
265 {
266 ANV_FROM_HANDLE(anv_device, device, _device);
267 struct anv_fence *fence;
268
269 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
270
271 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
272 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
273 if (fence == NULL)
274 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
275
276 if (device->instance->physicalDevice.has_syncobj_wait) {
277 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
278
279 uint32_t create_flags = 0;
280 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
281 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
282
283 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
284 if (!fence->permanent.syncobj)
285 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
286 } else {
287 fence->permanent.type = ANV_FENCE_TYPE_BO;
288
289 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
290 &fence->permanent.bo.bo, 4096);
291 if (result != VK_SUCCESS)
292 return result;
293
294 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
295 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
296 } else {
297 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
298 }
299 }
300
301 *pFence = anv_fence_to_handle(fence);
302
303 return VK_SUCCESS;
304 }
305
306 static void
307 anv_fence_impl_cleanup(struct anv_device *device,
308 struct anv_fence_impl *impl)
309 {
310 switch (impl->type) {
311 case ANV_FENCE_TYPE_NONE:
312 /* Dummy. Nothing to do */
313 break;
314
315 case ANV_FENCE_TYPE_BO:
316 anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
317 break;
318
319 case ANV_FENCE_TYPE_SYNCOBJ:
320 anv_gem_syncobj_destroy(device, impl->syncobj);
321 break;
322
323 case ANV_FENCE_TYPE_WSI:
324 impl->fence_wsi->destroy(impl->fence_wsi);
325 break;
326
327 default:
328 unreachable("Invalid fence type");
329 }
330
331 impl->type = ANV_FENCE_TYPE_NONE;
332 }
333
334 void anv_DestroyFence(
335 VkDevice _device,
336 VkFence _fence,
337 const VkAllocationCallbacks* pAllocator)
338 {
339 ANV_FROM_HANDLE(anv_device, device, _device);
340 ANV_FROM_HANDLE(anv_fence, fence, _fence);
341
342 if (!fence)
343 return;
344
345 anv_fence_impl_cleanup(device, &fence->temporary);
346 anv_fence_impl_cleanup(device, &fence->permanent);
347
348 vk_free2(&device->alloc, pAllocator, fence);
349 }
350
351 VkResult anv_ResetFences(
352 VkDevice _device,
353 uint32_t fenceCount,
354 const VkFence* pFences)
355 {
356 ANV_FROM_HANDLE(anv_device, device, _device);
357
358 for (uint32_t i = 0; i < fenceCount; i++) {
359 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
360
361 /* From the Vulkan 1.0.53 spec:
362 *
363 * "If any member of pFences currently has its payload imported with
364 * temporary permanence, that fence’s prior permanent payload is
365 * first restored. The remaining operations described therefore
366 * operate on the restored payload.
367 */
368 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
369 anv_fence_impl_cleanup(device, &fence->temporary);
370
371 struct anv_fence_impl *impl = &fence->permanent;
372
373 switch (impl->type) {
374 case ANV_FENCE_TYPE_BO:
375 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
376 break;
377
378 case ANV_FENCE_TYPE_SYNCOBJ:
379 anv_gem_syncobj_reset(device, impl->syncobj);
380 break;
381
382 default:
383 unreachable("Invalid fence type");
384 }
385 }
386
387 return VK_SUCCESS;
388 }
389
390 VkResult anv_GetFenceStatus(
391 VkDevice _device,
392 VkFence _fence)
393 {
394 ANV_FROM_HANDLE(anv_device, device, _device);
395 ANV_FROM_HANDLE(anv_fence, fence, _fence);
396
397 if (anv_device_is_lost(device))
398 return VK_ERROR_DEVICE_LOST;
399
400 struct anv_fence_impl *impl =
401 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
402 &fence->temporary : &fence->permanent;
403
404 switch (impl->type) {
405 case ANV_FENCE_TYPE_BO:
406 /* BO fences don't support import/export */
407 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
408 switch (impl->bo.state) {
409 case ANV_BO_FENCE_STATE_RESET:
410 /* If it hasn't even been sent off to the GPU yet, it's not ready */
411 return VK_NOT_READY;
412
413 case ANV_BO_FENCE_STATE_SIGNALED:
414 /* It's been signaled, return success */
415 return VK_SUCCESS;
416
417 case ANV_BO_FENCE_STATE_SUBMITTED: {
418 VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
419 if (result == VK_SUCCESS) {
420 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
421 return VK_SUCCESS;
422 } else {
423 return result;
424 }
425 }
426 default:
427 unreachable("Invalid fence status");
428 }
429
430 case ANV_FENCE_TYPE_SYNCOBJ: {
431 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
432 if (ret == -1) {
433 if (errno == ETIME) {
434 return VK_NOT_READY;
435 } else {
436 /* We don't know the real error. */
437 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
438 }
439 } else {
440 return VK_SUCCESS;
441 }
442 }
443
444 default:
445 unreachable("Invalid fence type");
446 }
447 }
448
449 #define NSEC_PER_SEC 1000000000
450 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
451
452 static uint64_t
453 gettime_ns(void)
454 {
455 struct timespec current;
456 clock_gettime(CLOCK_MONOTONIC, &current);
457 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
458 }
459
460 static uint64_t anv_get_absolute_timeout(uint64_t timeout)
461 {
462 if (timeout == 0)
463 return 0;
464 uint64_t current_time = gettime_ns();
465 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
466
467 timeout = MIN2(max_timeout, timeout);
468
469 return (current_time + timeout);
470 }
471
472 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
473 {
474 uint64_t now = gettime_ns();
475
476 if (abs_timeout < now)
477 return 0;
478 return abs_timeout - now;
479 }
480
481 static VkResult
482 anv_wait_for_syncobj_fences(struct anv_device *device,
483 uint32_t fenceCount,
484 const VkFence *pFences,
485 bool waitAll,
486 uint64_t abs_timeout_ns)
487 {
488 uint32_t *syncobjs = vk_zalloc(&device->alloc,
489 sizeof(*syncobjs) * fenceCount, 8,
490 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
491 if (!syncobjs)
492 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
493
494 for (uint32_t i = 0; i < fenceCount; i++) {
495 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
496 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
497
498 struct anv_fence_impl *impl =
499 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
500 &fence->temporary : &fence->permanent;
501
502 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
503 syncobjs[i] = impl->syncobj;
504 }
505
506 /* The gem_syncobj_wait ioctl may return early due to an inherent
507 * limitation in the way it computes timeouts. Loop until we've actually
508 * passed the timeout.
509 */
510 int ret;
511 do {
512 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
513 abs_timeout_ns, waitAll);
514 } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
515
516 vk_free(&device->alloc, syncobjs);
517
518 if (ret == -1) {
519 if (errno == ETIME) {
520 return VK_TIMEOUT;
521 } else {
522 /* We don't know the real error. */
523 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
524 }
525 } else {
526 return VK_SUCCESS;
527 }
528 }
529
530 static VkResult
531 anv_wait_for_bo_fences(struct anv_device *device,
532 uint32_t fenceCount,
533 const VkFence *pFences,
534 bool waitAll,
535 uint64_t _timeout)
536 {
537 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
538 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
539 * for a couple of kernel releases. Since there's no way to know
540 * whether or not the kernel we're using is one of the broken ones, the
541 * best we can do is to clamp the timeout to INT64_MAX. This limits the
542 * maximum timeout from 584 years to 292 years - likely not a big deal.
543 */
544 int64_t timeout = MIN2(_timeout, (uint64_t) INT64_MAX);
545
546 VkResult result = VK_SUCCESS;
547 uint32_t pending_fences = fenceCount;
548 while (pending_fences) {
549 pending_fences = 0;
550 bool signaled_fences = false;
551 for (uint32_t i = 0; i < fenceCount; i++) {
552 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
553
554 /* This function assumes that all fences are BO fences and that they
555 * have no temporary state. Since BO fences will never be exported,
556 * this should be a safe assumption.
557 */
558 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
559 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
560 struct anv_fence_impl *impl = &fence->permanent;
561
562 switch (impl->bo.state) {
563 case ANV_BO_FENCE_STATE_RESET:
564 /* This fence hasn't been submitted yet, we'll catch it the next
565 * time around. Yes, this may mean we dead-loop but, short of
566 * lots of locking and a condition variable, there's not much that
567 * we can do about that.
568 */
569 pending_fences++;
570 continue;
571
572 case ANV_BO_FENCE_STATE_SIGNALED:
573 /* This fence is not pending. If waitAll isn't set, we can return
574 * early. Otherwise, we have to keep going.
575 */
576 if (!waitAll) {
577 result = VK_SUCCESS;
578 goto done;
579 }
580 continue;
581
582 case ANV_BO_FENCE_STATE_SUBMITTED:
583 /* These are the fences we really care about. Go ahead and wait
584 * on it until we hit a timeout.
585 */
586 result = anv_device_wait(device, &impl->bo.bo, timeout);
587 switch (result) {
588 case VK_SUCCESS:
589 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
590 signaled_fences = true;
591 if (!waitAll)
592 goto done;
593 break;
594
595 case VK_TIMEOUT:
596 goto done;
597
598 default:
599 return result;
600 }
601 }
602 }
603
604 if (pending_fences && !signaled_fences) {
605 /* If we've hit this then someone decided to vkWaitForFences before
606 * they've actually submitted any of them to a queue. This is a
607 * fairly pessimal case, so it's ok to lock here and use a standard
608 * pthreads condition variable.
609 */
610 pthread_mutex_lock(&device->mutex);
611
612 /* It's possible that some of the fences have changed state since the
613 * last time we checked. Now that we have the lock, check for
614 * pending fences again and don't wait if it's changed.
615 */
616 uint32_t now_pending_fences = 0;
617 for (uint32_t i = 0; i < fenceCount; i++) {
618 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
619 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
620 now_pending_fences++;
621 }
622 assert(now_pending_fences <= pending_fences);
623
624 if (now_pending_fences == pending_fences) {
625 struct timespec before;
626 clock_gettime(CLOCK_MONOTONIC, &before);
627
628 uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
629 uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
630 (timeout / NSEC_PER_SEC);
631 abs_nsec %= NSEC_PER_SEC;
632
633 /* Avoid roll-over in tv_sec on 32-bit systems if the user
634 * provided timeout is UINT64_MAX
635 */
636 struct timespec abstime;
637 abstime.tv_nsec = abs_nsec;
638 abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
639
640 MAYBE_UNUSED int ret;
641 ret = pthread_cond_timedwait(&device->queue_submit,
642 &device->mutex, &abstime);
643 assert(ret != EINVAL);
644
645 struct timespec after;
646 clock_gettime(CLOCK_MONOTONIC, &after);
647 uint64_t time_elapsed =
648 ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
649 ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
650
651 if (time_elapsed >= timeout) {
652 pthread_mutex_unlock(&device->mutex);
653 result = VK_TIMEOUT;
654 goto done;
655 }
656
657 timeout -= time_elapsed;
658 }
659
660 pthread_mutex_unlock(&device->mutex);
661 }
662 }
663
664 done:
665 if (anv_device_is_lost(device))
666 return VK_ERROR_DEVICE_LOST;
667
668 return result;
669 }
670
671 static VkResult
672 anv_wait_for_wsi_fence(struct anv_device *device,
673 const VkFence _fence,
674 uint64_t abs_timeout)
675 {
676 ANV_FROM_HANDLE(anv_fence, fence, _fence);
677 struct anv_fence_impl *impl = &fence->permanent;
678
679 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
680 }
681
682 static VkResult
683 anv_wait_for_fences(struct anv_device *device,
684 uint32_t fenceCount,
685 const VkFence *pFences,
686 bool waitAll,
687 uint64_t abs_timeout)
688 {
689 VkResult result = VK_SUCCESS;
690
691 if (fenceCount <= 1 || waitAll) {
692 for (uint32_t i = 0; i < fenceCount; i++) {
693 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
694 switch (fence->permanent.type) {
695 case ANV_FENCE_TYPE_BO:
696 result = anv_wait_for_bo_fences(
697 device, 1, &pFences[i], true,
698 anv_get_relative_timeout(abs_timeout));
699 break;
700 case ANV_FENCE_TYPE_SYNCOBJ:
701 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
702 true, abs_timeout);
703 break;
704 case ANV_FENCE_TYPE_WSI:
705 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
706 break;
707 case ANV_FENCE_TYPE_NONE:
708 result = VK_SUCCESS;
709 break;
710 }
711 if (result != VK_SUCCESS)
712 return result;
713 }
714 } else {
715 do {
716 for (uint32_t i = 0; i < fenceCount; i++) {
717 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
718 return VK_SUCCESS;
719 }
720 } while (gettime_ns() < abs_timeout);
721 result = VK_TIMEOUT;
722 }
723 return result;
724 }
725
726 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
727 {
728 for (uint32_t i = 0; i < fenceCount; ++i) {
729 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
730 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
731 return false;
732 }
733 return true;
734 }
735
736 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
737 {
738 for (uint32_t i = 0; i < fenceCount; ++i) {
739 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
740 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
741 return false;
742 }
743 return true;
744 }
745
746 VkResult anv_WaitForFences(
747 VkDevice _device,
748 uint32_t fenceCount,
749 const VkFence* pFences,
750 VkBool32 waitAll,
751 uint64_t timeout)
752 {
753 ANV_FROM_HANDLE(anv_device, device, _device);
754
755 if (anv_device_is_lost(device))
756 return VK_ERROR_DEVICE_LOST;
757
758 if (anv_all_fences_syncobj(fenceCount, pFences)) {
759 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
760 waitAll, anv_get_absolute_timeout(timeout));
761 } else if (anv_all_fences_bo(fenceCount, pFences)) {
762 return anv_wait_for_bo_fences(device, fenceCount, pFences,
763 waitAll, timeout);
764 } else {
765 return anv_wait_for_fences(device, fenceCount, pFences,
766 waitAll, anv_get_absolute_timeout(timeout));
767 }
768 }
769
770 void anv_GetPhysicalDeviceExternalFenceProperties(
771 VkPhysicalDevice physicalDevice,
772 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
773 VkExternalFencePropertiesKHR* pExternalFenceProperties)
774 {
775 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
776
777 switch (pExternalFenceInfo->handleType) {
778 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
779 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
780 if (device->has_syncobj_wait) {
781 pExternalFenceProperties->exportFromImportedHandleTypes =
782 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
783 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
784 pExternalFenceProperties->compatibleHandleTypes =
785 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
786 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
787 pExternalFenceProperties->externalFenceFeatures =
788 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
789 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
790 return;
791 }
792 break;
793
794 default:
795 break;
796 }
797
798 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
799 pExternalFenceProperties->compatibleHandleTypes = 0;
800 pExternalFenceProperties->externalFenceFeatures = 0;
801 }
802
803 VkResult anv_ImportFenceFdKHR(
804 VkDevice _device,
805 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
806 {
807 ANV_FROM_HANDLE(anv_device, device, _device);
808 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
809 int fd = pImportFenceFdInfo->fd;
810
811 assert(pImportFenceFdInfo->sType ==
812 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
813
814 struct anv_fence_impl new_impl = {
815 .type = ANV_FENCE_TYPE_NONE,
816 };
817
818 switch (pImportFenceFdInfo->handleType) {
819 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
820 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
821
822 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
823 if (!new_impl.syncobj)
824 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
825
826 break;
827
828 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
829 /* Sync files are a bit tricky. Because we want to continue using the
830 * syncobj implementation of WaitForFences, we don't use the sync file
831 * directly but instead import it into a syncobj.
832 */
833 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
834
835 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
836 if (!new_impl.syncobj)
837 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
838
839 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
840 anv_gem_syncobj_destroy(device, new_impl.syncobj);
841 return vk_errorf(device->instance, NULL,
842 VK_ERROR_INVALID_EXTERNAL_HANDLE,
843 "syncobj sync file import failed: %m");
844 }
845 break;
846
847 default:
848 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
849 }
850
851 /* From the Vulkan 1.0.53 spec:
852 *
853 * "Importing a fence payload from a file descriptor transfers
854 * ownership of the file descriptor from the application to the
855 * Vulkan implementation. The application must not perform any
856 * operations on the file descriptor after a successful import."
857 *
858 * If the import fails, we leave the file descriptor open.
859 */
860 close(fd);
861
862 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
863 anv_fence_impl_cleanup(device, &fence->temporary);
864 fence->temporary = new_impl;
865 } else {
866 anv_fence_impl_cleanup(device, &fence->permanent);
867 fence->permanent = new_impl;
868 }
869
870 return VK_SUCCESS;
871 }
872
873 VkResult anv_GetFenceFdKHR(
874 VkDevice _device,
875 const VkFenceGetFdInfoKHR* pGetFdInfo,
876 int* pFd)
877 {
878 ANV_FROM_HANDLE(anv_device, device, _device);
879 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
880
881 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
882
883 struct anv_fence_impl *impl =
884 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
885 &fence->temporary : &fence->permanent;
886
887 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
888 switch (pGetFdInfo->handleType) {
889 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
890 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
891 if (fd < 0)
892 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
893
894 *pFd = fd;
895 break;
896 }
897
898 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
899 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
900 if (fd < 0)
901 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
902
903 *pFd = fd;
904 break;
905 }
906
907 default:
908 unreachable("Invalid fence export handle type");
909 }
910
911 /* From the Vulkan 1.0.53 spec:
912 *
913 * "Export operations have the same transference as the specified handle
914 * type’s import operations. [...] If the fence was using a
915 * temporarily imported payload, the fence’s prior permanent payload
916 * will be restored.
917 */
918 if (impl == &fence->temporary)
919 anv_fence_impl_cleanup(device, impl);
920
921 return VK_SUCCESS;
922 }
923
924 // Queue semaphore functions
925
926 VkResult anv_CreateSemaphore(
927 VkDevice _device,
928 const VkSemaphoreCreateInfo* pCreateInfo,
929 const VkAllocationCallbacks* pAllocator,
930 VkSemaphore* pSemaphore)
931 {
932 ANV_FROM_HANDLE(anv_device, device, _device);
933 struct anv_semaphore *semaphore;
934
935 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
936
937 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
938 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
939 if (semaphore == NULL)
940 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
941
942 const VkExportSemaphoreCreateInfoKHR *export =
943 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
944 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
945 export ? export->handleTypes : 0;
946
947 if (handleTypes == 0) {
948 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
949 * on the same ring. Since we don't expose the blit engine as a DMA
950 * queue, a dummy no-op semaphore is a perfectly valid implementation.
951 */
952 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
953 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
954 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
955 if (device->instance->physicalDevice.has_syncobj) {
956 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
957 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
958 if (!semaphore->permanent.syncobj) {
959 vk_free2(&device->alloc, pAllocator, semaphore);
960 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
961 }
962 } else {
963 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
964 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
965 4096, 0,
966 &semaphore->permanent.bo);
967 if (result != VK_SUCCESS) {
968 vk_free2(&device->alloc, pAllocator, semaphore);
969 return result;
970 }
971
972 /* If we're going to use this as a fence, we need to *not* have the
973 * EXEC_OBJECT_ASYNC bit set.
974 */
975 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
976 }
977 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
978 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
979
980 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
981 semaphore->permanent.fd = -1;
982 } else {
983 assert(!"Unknown handle type");
984 vk_free2(&device->alloc, pAllocator, semaphore);
985 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
986 }
987
988 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
989
990 *pSemaphore = anv_semaphore_to_handle(semaphore);
991
992 return VK_SUCCESS;
993 }
994
995 static void
996 anv_semaphore_impl_cleanup(struct anv_device *device,
997 struct anv_semaphore_impl *impl)
998 {
999 switch (impl->type) {
1000 case ANV_SEMAPHORE_TYPE_NONE:
1001 case ANV_SEMAPHORE_TYPE_DUMMY:
1002 /* Dummy. Nothing to do */
1003 break;
1004
1005 case ANV_SEMAPHORE_TYPE_BO:
1006 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
1007 break;
1008
1009 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1010 close(impl->fd);
1011 break;
1012
1013 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1014 anv_gem_syncobj_destroy(device, impl->syncobj);
1015 break;
1016
1017 default:
1018 unreachable("Invalid semaphore type");
1019 }
1020
1021 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1022 }
1023
1024 void
1025 anv_semaphore_reset_temporary(struct anv_device *device,
1026 struct anv_semaphore *semaphore)
1027 {
1028 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1029 return;
1030
1031 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1032 }
1033
1034 void anv_DestroySemaphore(
1035 VkDevice _device,
1036 VkSemaphore _semaphore,
1037 const VkAllocationCallbacks* pAllocator)
1038 {
1039 ANV_FROM_HANDLE(anv_device, device, _device);
1040 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1041
1042 if (semaphore == NULL)
1043 return;
1044
1045 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1046 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1047
1048 vk_free2(&device->alloc, pAllocator, semaphore);
1049 }
1050
1051 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1052 VkPhysicalDevice physicalDevice,
1053 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
1054 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
1055 {
1056 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1057
1058 switch (pExternalSemaphoreInfo->handleType) {
1059 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1060 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1061 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1062 pExternalSemaphoreProperties->compatibleHandleTypes =
1063 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1064 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1065 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1066 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1067 return;
1068
1069 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1070 if (device->has_exec_fence) {
1071 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1072 pExternalSemaphoreProperties->compatibleHandleTypes =
1073 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1074 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1075 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1076 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1077 return;
1078 }
1079 break;
1080
1081 default:
1082 break;
1083 }
1084
1085 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1086 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1087 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1088 }
1089
1090 VkResult anv_ImportSemaphoreFdKHR(
1091 VkDevice _device,
1092 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1093 {
1094 ANV_FROM_HANDLE(anv_device, device, _device);
1095 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1096 int fd = pImportSemaphoreFdInfo->fd;
1097
1098 struct anv_semaphore_impl new_impl = {
1099 .type = ANV_SEMAPHORE_TYPE_NONE,
1100 };
1101
1102 switch (pImportSemaphoreFdInfo->handleType) {
1103 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1104 if (device->instance->physicalDevice.has_syncobj) {
1105 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1106
1107 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1108 if (!new_impl.syncobj)
1109 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1110 } else {
1111 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1112
1113 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1114 fd, 0, &new_impl.bo);
1115 if (result != VK_SUCCESS)
1116 return result;
1117
1118 if (new_impl.bo->size < 4096) {
1119 anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1120 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1121 }
1122
1123 /* If we're going to use this as a fence, we need to *not* have the
1124 * EXEC_OBJECT_ASYNC bit set.
1125 */
1126 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1127 }
1128
1129 /* From the Vulkan spec:
1130 *
1131 * "Importing semaphore state from a file descriptor transfers
1132 * ownership of the file descriptor from the application to the
1133 * Vulkan implementation. The application must not perform any
1134 * operations on the file descriptor after a successful import."
1135 *
1136 * If the import fails, we leave the file descriptor open.
1137 */
1138 close(fd);
1139 break;
1140
1141 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1142 new_impl = (struct anv_semaphore_impl) {
1143 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1144 .fd = fd,
1145 };
1146 break;
1147
1148 default:
1149 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1150 }
1151
1152 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1153 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1154 semaphore->temporary = new_impl;
1155 } else {
1156 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1157 semaphore->permanent = new_impl;
1158 }
1159
1160 return VK_SUCCESS;
1161 }
1162
1163 VkResult anv_GetSemaphoreFdKHR(
1164 VkDevice _device,
1165 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1166 int* pFd)
1167 {
1168 ANV_FROM_HANDLE(anv_device, device, _device);
1169 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1170 VkResult result;
1171 int fd;
1172
1173 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1174
1175 struct anv_semaphore_impl *impl =
1176 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1177 &semaphore->temporary : &semaphore->permanent;
1178
1179 switch (impl->type) {
1180 case ANV_SEMAPHORE_TYPE_BO:
1181 result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1182 if (result != VK_SUCCESS)
1183 return result;
1184 break;
1185
1186 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1187 /* There are two reasons why this could happen:
1188 *
1189 * 1) The user is trying to export without submitting something that
1190 * signals the semaphore. If this is the case, it's their bug so
1191 * what we return here doesn't matter.
1192 *
1193 * 2) The kernel didn't give us a file descriptor. The most likely
1194 * reason for this is running out of file descriptors.
1195 */
1196 if (impl->fd < 0)
1197 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1198
1199 *pFd = impl->fd;
1200
1201 /* From the Vulkan 1.0.53 spec:
1202 *
1203 * "...exporting a semaphore payload to a handle with copy
1204 * transference has the same side effects on the source
1205 * semaphore’s payload as executing a semaphore wait operation."
1206 *
1207 * In other words, it may still be a SYNC_FD semaphore, but it's now
1208 * considered to have been waited on and no longer has a sync file
1209 * attached.
1210 */
1211 impl->fd = -1;
1212 return VK_SUCCESS;
1213
1214 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1215 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1216 if (fd < 0)
1217 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1218 *pFd = fd;
1219 break;
1220
1221 default:
1222 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1223 }
1224
1225 /* From the Vulkan 1.0.53 spec:
1226 *
1227 * "Export operations have the same transference as the specified handle
1228 * type’s import operations. [...] If the semaphore was using a
1229 * temporarily imported payload, the semaphore’s prior permanent payload
1230 * will be restored.
1231 */
1232 if (impl == &semaphore->temporary)
1233 anv_semaphore_impl_cleanup(device, impl);
1234
1235 return VK_SUCCESS;
1236 }