cf063701b8d0bf71af2c77a4f7a801f8179efcd5
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 VkResult
38 anv_device_execbuf(struct anv_device *device,
39 struct drm_i915_gem_execbuffer2 *execbuf,
40 struct anv_bo **execbuf_bos)
41 {
42 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
43 if (ret != 0) {
44 /* We don't know the real error. */
45 device->lost = true;
46 return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
47 "execbuf2 failed: %m");
48 }
49
50 struct drm_i915_gem_exec_object2 *objects =
51 (void *)(uintptr_t)execbuf->buffers_ptr;
52 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
53 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
54 assert(execbuf_bos[k]->offset == objects[k].offset);
55 execbuf_bos[k]->offset = objects[k].offset;
56 }
57
58 return VK_SUCCESS;
59 }
60
61 VkResult
62 anv_device_submit_simple_batch(struct anv_device *device,
63 struct anv_batch *batch)
64 {
65 struct drm_i915_gem_execbuffer2 execbuf;
66 struct drm_i915_gem_exec_object2 exec2_objects[1];
67 struct anv_bo bo, *exec_bos[1];
68 VkResult result = VK_SUCCESS;
69 uint32_t size;
70
71 /* Kernel driver requires 8 byte aligned batch length */
72 size = align_u32(batch->next - batch->start, 8);
73 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
74 if (result != VK_SUCCESS)
75 return result;
76
77 memcpy(bo.map, batch->start, size);
78 if (!device->info.has_llc)
79 gen_flush_range(bo.map, size);
80
81 exec_bos[0] = &bo;
82 exec2_objects[0].handle = bo.gem_handle;
83 exec2_objects[0].relocation_count = 0;
84 exec2_objects[0].relocs_ptr = 0;
85 exec2_objects[0].alignment = 0;
86 exec2_objects[0].offset = bo.offset;
87 exec2_objects[0].flags = bo.flags;
88 exec2_objects[0].rsvd1 = 0;
89 exec2_objects[0].rsvd2 = 0;
90
91 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
92 execbuf.buffer_count = 1;
93 execbuf.batch_start_offset = 0;
94 execbuf.batch_len = size;
95 execbuf.cliprects_ptr = 0;
96 execbuf.num_cliprects = 0;
97 execbuf.DR1 = 0;
98 execbuf.DR4 = 0;
99
100 execbuf.flags =
101 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
102 execbuf.rsvd1 = device->context_id;
103 execbuf.rsvd2 = 0;
104
105 result = anv_device_execbuf(device, &execbuf, exec_bos);
106 if (result != VK_SUCCESS)
107 goto fail;
108
109 result = anv_device_wait(device, &bo, INT64_MAX);
110
111 fail:
112 anv_bo_pool_free(&device->batch_bo_pool, &bo);
113
114 return result;
115 }
116
117 VkResult anv_QueueSubmit(
118 VkQueue _queue,
119 uint32_t submitCount,
120 const VkSubmitInfo* pSubmits,
121 VkFence fence)
122 {
123 ANV_FROM_HANDLE(anv_queue, queue, _queue);
124 struct anv_device *device = queue->device;
125
126 /* Query for device status prior to submitting. Technically, we don't need
127 * to do this. However, if we have a client that's submitting piles of
128 * garbage, we would rather break as early as possible to keep the GPU
129 * hanging contained. If we don't check here, we'll either be waiting for
130 * the kernel to kick us or we'll have to wait until the client waits on a
131 * fence before we actually know whether or not we've hung.
132 */
133 VkResult result = anv_device_query_status(device);
134 if (result != VK_SUCCESS)
135 return result;
136
137 /* We lock around QueueSubmit for three main reasons:
138 *
139 * 1) When a block pool is resized, we create a new gem handle with a
140 * different size and, in the case of surface states, possibly a
141 * different center offset but we re-use the same anv_bo struct when
142 * we do so. If this happens in the middle of setting up an execbuf,
143 * we could end up with our list of BOs out of sync with our list of
144 * gem handles.
145 *
146 * 2) The algorithm we use for building the list of unique buffers isn't
147 * thread-safe. While the client is supposed to syncronize around
148 * QueueSubmit, this would be extremely difficult to debug if it ever
149 * came up in the wild due to a broken app. It's better to play it
150 * safe and just lock around QueueSubmit.
151 *
152 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
153 * userspace. Due to the fact that the surface state buffer is shared
154 * between batches, we can't afford to have that happen from multiple
155 * threads at the same time. Even though the user is supposed to
156 * ensure this doesn't happen, we play it safe as in (2) above.
157 *
158 * Since the only other things that ever take the device lock such as block
159 * pool resize only rarely happen, this will almost never be contended so
160 * taking a lock isn't really an expensive operation in this case.
161 */
162 pthread_mutex_lock(&device->mutex);
163
164 if (fence && submitCount == 0) {
165 /* If we don't have any command buffers, we need to submit a dummy
166 * batch to give GEM something to wait on. We could, potentially,
167 * come up with something more efficient but this shouldn't be a
168 * common case.
169 */
170 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
171 goto out;
172 }
173
174 for (uint32_t i = 0; i < submitCount; i++) {
175 /* Fence for this submit. NULL for all but the last one */
176 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
177
178 if (pSubmits[i].commandBufferCount == 0) {
179 /* If we don't have any command buffers, we need to submit a dummy
180 * batch to give GEM something to wait on. We could, potentially,
181 * come up with something more efficient but this shouldn't be a
182 * common case.
183 */
184 result = anv_cmd_buffer_execbuf(device, NULL,
185 pSubmits[i].pWaitSemaphores,
186 pSubmits[i].waitSemaphoreCount,
187 pSubmits[i].pSignalSemaphores,
188 pSubmits[i].signalSemaphoreCount,
189 submit_fence);
190 if (result != VK_SUCCESS)
191 goto out;
192
193 continue;
194 }
195
196 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
197 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
198 pSubmits[i].pCommandBuffers[j]);
199 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
200 assert(!anv_batch_has_error(&cmd_buffer->batch));
201
202 /* Fence for this execbuf. NULL for all but the last one */
203 VkFence execbuf_fence =
204 (j == pSubmits[i].commandBufferCount - 1) ?
205 submit_fence : VK_NULL_HANDLE;
206
207 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
208 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
209 if (j == 0) {
210 /* Only the first batch gets the in semaphores */
211 in_semaphores = pSubmits[i].pWaitSemaphores;
212 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
213 }
214
215 if (j == pSubmits[i].commandBufferCount - 1) {
216 /* Only the last batch gets the out semaphores */
217 out_semaphores = pSubmits[i].pSignalSemaphores;
218 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
219 }
220
221 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
222 in_semaphores, num_in_semaphores,
223 out_semaphores, num_out_semaphores,
224 execbuf_fence);
225 if (result != VK_SUCCESS)
226 goto out;
227 }
228 }
229
230 pthread_cond_broadcast(&device->queue_submit);
231
232 out:
233 if (result != VK_SUCCESS) {
234 /* In the case that something has gone wrong we may end up with an
235 * inconsistent state from which it may not be trivial to recover.
236 * For example, we might have computed address relocations and
237 * any future attempt to re-submit this job will need to know about
238 * this and avoid computing relocation addresses again.
239 *
240 * To avoid this sort of issues, we assume that if something was
241 * wrong during submission we must already be in a really bad situation
242 * anyway (such us being out of memory) and return
243 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
244 * submit the same job again to this device.
245 */
246 result = vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
247 "vkQueueSubmit() failed");
248 device->lost = true;
249 }
250
251 pthread_mutex_unlock(&device->mutex);
252
253 return result;
254 }
255
256 VkResult anv_QueueWaitIdle(
257 VkQueue _queue)
258 {
259 ANV_FROM_HANDLE(anv_queue, queue, _queue);
260
261 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
262 }
263
264 VkResult anv_CreateFence(
265 VkDevice _device,
266 const VkFenceCreateInfo* pCreateInfo,
267 const VkAllocationCallbacks* pAllocator,
268 VkFence* pFence)
269 {
270 ANV_FROM_HANDLE(anv_device, device, _device);
271 struct anv_fence *fence;
272
273 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
274
275 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
276 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
277 if (fence == NULL)
278 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
279
280 if (device->instance->physicalDevice.has_syncobj_wait) {
281 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
282
283 uint32_t create_flags = 0;
284 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
285 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
286
287 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
288 if (!fence->permanent.syncobj)
289 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
290 } else {
291 fence->permanent.type = ANV_FENCE_TYPE_BO;
292
293 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
294 &fence->permanent.bo.bo, 4096);
295 if (result != VK_SUCCESS)
296 return result;
297
298 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
299 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
300 } else {
301 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
302 }
303 }
304
305 *pFence = anv_fence_to_handle(fence);
306
307 return VK_SUCCESS;
308 }
309
310 static void
311 anv_fence_impl_cleanup(struct anv_device *device,
312 struct anv_fence_impl *impl)
313 {
314 switch (impl->type) {
315 case ANV_FENCE_TYPE_NONE:
316 /* Dummy. Nothing to do */
317 break;
318
319 case ANV_FENCE_TYPE_BO:
320 anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
321 break;
322
323 case ANV_FENCE_TYPE_SYNCOBJ:
324 anv_gem_syncobj_destroy(device, impl->syncobj);
325 break;
326
327 case ANV_FENCE_TYPE_WSI:
328 impl->fence_wsi->destroy(impl->fence_wsi);
329 break;
330
331 default:
332 unreachable("Invalid fence type");
333 }
334
335 impl->type = ANV_FENCE_TYPE_NONE;
336 }
337
338 void anv_DestroyFence(
339 VkDevice _device,
340 VkFence _fence,
341 const VkAllocationCallbacks* pAllocator)
342 {
343 ANV_FROM_HANDLE(anv_device, device, _device);
344 ANV_FROM_HANDLE(anv_fence, fence, _fence);
345
346 if (!fence)
347 return;
348
349 anv_fence_impl_cleanup(device, &fence->temporary);
350 anv_fence_impl_cleanup(device, &fence->permanent);
351
352 vk_free2(&device->alloc, pAllocator, fence);
353 }
354
355 VkResult anv_ResetFences(
356 VkDevice _device,
357 uint32_t fenceCount,
358 const VkFence* pFences)
359 {
360 ANV_FROM_HANDLE(anv_device, device, _device);
361
362 for (uint32_t i = 0; i < fenceCount; i++) {
363 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
364
365 /* From the Vulkan 1.0.53 spec:
366 *
367 * "If any member of pFences currently has its payload imported with
368 * temporary permanence, that fence’s prior permanent payload is
369 * first restored. The remaining operations described therefore
370 * operate on the restored payload.
371 */
372 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
373 anv_fence_impl_cleanup(device, &fence->temporary);
374
375 struct anv_fence_impl *impl = &fence->permanent;
376
377 switch (impl->type) {
378 case ANV_FENCE_TYPE_BO:
379 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
380 break;
381
382 case ANV_FENCE_TYPE_SYNCOBJ:
383 anv_gem_syncobj_reset(device, impl->syncobj);
384 break;
385
386 default:
387 unreachable("Invalid fence type");
388 }
389 }
390
391 return VK_SUCCESS;
392 }
393
394 VkResult anv_GetFenceStatus(
395 VkDevice _device,
396 VkFence _fence)
397 {
398 ANV_FROM_HANDLE(anv_device, device, _device);
399 ANV_FROM_HANDLE(anv_fence, fence, _fence);
400
401 if (unlikely(device->lost))
402 return VK_ERROR_DEVICE_LOST;
403
404 struct anv_fence_impl *impl =
405 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
406 &fence->temporary : &fence->permanent;
407
408 switch (impl->type) {
409 case ANV_FENCE_TYPE_BO:
410 /* BO fences don't support import/export */
411 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
412 switch (impl->bo.state) {
413 case ANV_BO_FENCE_STATE_RESET:
414 /* If it hasn't even been sent off to the GPU yet, it's not ready */
415 return VK_NOT_READY;
416
417 case ANV_BO_FENCE_STATE_SIGNALED:
418 /* It's been signaled, return success */
419 return VK_SUCCESS;
420
421 case ANV_BO_FENCE_STATE_SUBMITTED: {
422 VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
423 if (result == VK_SUCCESS) {
424 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
425 return VK_SUCCESS;
426 } else {
427 return result;
428 }
429 }
430 default:
431 unreachable("Invalid fence status");
432 }
433
434 case ANV_FENCE_TYPE_SYNCOBJ: {
435 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
436 if (ret == -1) {
437 if (errno == ETIME) {
438 return VK_NOT_READY;
439 } else {
440 /* We don't know the real error. */
441 device->lost = true;
442 return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
443 "drm_syncobj_wait failed: %m");
444 }
445 } else {
446 return VK_SUCCESS;
447 }
448 }
449
450 default:
451 unreachable("Invalid fence type");
452 }
453 }
454
455 #define NSEC_PER_SEC 1000000000
456 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
457
458 static uint64_t
459 gettime_ns(void)
460 {
461 struct timespec current;
462 clock_gettime(CLOCK_MONOTONIC, &current);
463 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
464 }
465
466 static uint64_t anv_get_absolute_timeout(uint64_t timeout)
467 {
468 if (timeout == 0)
469 return 0;
470 uint64_t current_time = gettime_ns();
471 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
472
473 timeout = MIN2(max_timeout, timeout);
474
475 return (current_time + timeout);
476 }
477
478 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
479 {
480 uint64_t now = gettime_ns();
481
482 if (abs_timeout < now)
483 return 0;
484 return abs_timeout - now;
485 }
486
487 static VkResult
488 anv_wait_for_syncobj_fences(struct anv_device *device,
489 uint32_t fenceCount,
490 const VkFence *pFences,
491 bool waitAll,
492 uint64_t abs_timeout_ns)
493 {
494 uint32_t *syncobjs = vk_zalloc(&device->alloc,
495 sizeof(*syncobjs) * fenceCount, 8,
496 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
497 if (!syncobjs)
498 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
499
500 for (uint32_t i = 0; i < fenceCount; i++) {
501 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
502 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
503
504 struct anv_fence_impl *impl =
505 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
506 &fence->temporary : &fence->permanent;
507
508 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
509 syncobjs[i] = impl->syncobj;
510 }
511
512 /* The gem_syncobj_wait ioctl may return early due to an inherent
513 * limitation in the way it computes timeouts. Loop until we've actually
514 * passed the timeout.
515 */
516 int ret;
517 do {
518 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
519 abs_timeout_ns, waitAll);
520 } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
521
522 vk_free(&device->alloc, syncobjs);
523
524 if (ret == -1) {
525 if (errno == ETIME) {
526 return VK_TIMEOUT;
527 } else {
528 /* We don't know the real error. */
529 device->lost = true;
530 return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
531 "drm_syncobj_wait failed: %m");
532 }
533 } else {
534 return VK_SUCCESS;
535 }
536 }
537
538 static VkResult
539 anv_wait_for_bo_fences(struct anv_device *device,
540 uint32_t fenceCount,
541 const VkFence *pFences,
542 bool waitAll,
543 uint64_t _timeout)
544 {
545 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
546 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
547 * for a couple of kernel releases. Since there's no way to know
548 * whether or not the kernel we're using is one of the broken ones, the
549 * best we can do is to clamp the timeout to INT64_MAX. This limits the
550 * maximum timeout from 584 years to 292 years - likely not a big deal.
551 */
552 int64_t timeout = MIN2(_timeout, (uint64_t) INT64_MAX);
553
554 VkResult result = VK_SUCCESS;
555 uint32_t pending_fences = fenceCount;
556 while (pending_fences) {
557 pending_fences = 0;
558 bool signaled_fences = false;
559 for (uint32_t i = 0; i < fenceCount; i++) {
560 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
561
562 /* This function assumes that all fences are BO fences and that they
563 * have no temporary state. Since BO fences will never be exported,
564 * this should be a safe assumption.
565 */
566 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
567 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
568 struct anv_fence_impl *impl = &fence->permanent;
569
570 switch (impl->bo.state) {
571 case ANV_BO_FENCE_STATE_RESET:
572 /* This fence hasn't been submitted yet, we'll catch it the next
573 * time around. Yes, this may mean we dead-loop but, short of
574 * lots of locking and a condition variable, there's not much that
575 * we can do about that.
576 */
577 pending_fences++;
578 continue;
579
580 case ANV_BO_FENCE_STATE_SIGNALED:
581 /* This fence is not pending. If waitAll isn't set, we can return
582 * early. Otherwise, we have to keep going.
583 */
584 if (!waitAll) {
585 result = VK_SUCCESS;
586 goto done;
587 }
588 continue;
589
590 case ANV_BO_FENCE_STATE_SUBMITTED:
591 /* These are the fences we really care about. Go ahead and wait
592 * on it until we hit a timeout.
593 */
594 result = anv_device_wait(device, &impl->bo.bo, timeout);
595 switch (result) {
596 case VK_SUCCESS:
597 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
598 signaled_fences = true;
599 if (!waitAll)
600 goto done;
601 break;
602
603 case VK_TIMEOUT:
604 goto done;
605
606 default:
607 return result;
608 }
609 }
610 }
611
612 if (pending_fences && !signaled_fences) {
613 /* If we've hit this then someone decided to vkWaitForFences before
614 * they've actually submitted any of them to a queue. This is a
615 * fairly pessimal case, so it's ok to lock here and use a standard
616 * pthreads condition variable.
617 */
618 pthread_mutex_lock(&device->mutex);
619
620 /* It's possible that some of the fences have changed state since the
621 * last time we checked. Now that we have the lock, check for
622 * pending fences again and don't wait if it's changed.
623 */
624 uint32_t now_pending_fences = 0;
625 for (uint32_t i = 0; i < fenceCount; i++) {
626 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
627 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
628 now_pending_fences++;
629 }
630 assert(now_pending_fences <= pending_fences);
631
632 if (now_pending_fences == pending_fences) {
633 struct timespec before;
634 clock_gettime(CLOCK_MONOTONIC, &before);
635
636 uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
637 uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
638 (timeout / NSEC_PER_SEC);
639 abs_nsec %= NSEC_PER_SEC;
640
641 /* Avoid roll-over in tv_sec on 32-bit systems if the user
642 * provided timeout is UINT64_MAX
643 */
644 struct timespec abstime;
645 abstime.tv_nsec = abs_nsec;
646 abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
647
648 MAYBE_UNUSED int ret;
649 ret = pthread_cond_timedwait(&device->queue_submit,
650 &device->mutex, &abstime);
651 assert(ret != EINVAL);
652
653 struct timespec after;
654 clock_gettime(CLOCK_MONOTONIC, &after);
655 uint64_t time_elapsed =
656 ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
657 ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
658
659 if (time_elapsed >= timeout) {
660 pthread_mutex_unlock(&device->mutex);
661 result = VK_TIMEOUT;
662 goto done;
663 }
664
665 timeout -= time_elapsed;
666 }
667
668 pthread_mutex_unlock(&device->mutex);
669 }
670 }
671
672 done:
673 if (unlikely(device->lost))
674 return VK_ERROR_DEVICE_LOST;
675
676 return result;
677 }
678
679 static VkResult
680 anv_wait_for_wsi_fence(struct anv_device *device,
681 const VkFence _fence,
682 uint64_t abs_timeout)
683 {
684 ANV_FROM_HANDLE(anv_fence, fence, _fence);
685 struct anv_fence_impl *impl = &fence->permanent;
686
687 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
688 }
689
690 static VkResult
691 anv_wait_for_fences(struct anv_device *device,
692 uint32_t fenceCount,
693 const VkFence *pFences,
694 bool waitAll,
695 uint64_t abs_timeout)
696 {
697 VkResult result = VK_SUCCESS;
698
699 if (fenceCount <= 1 || waitAll) {
700 for (uint32_t i = 0; i < fenceCount; i++) {
701 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
702 switch (fence->permanent.type) {
703 case ANV_FENCE_TYPE_BO:
704 result = anv_wait_for_bo_fences(
705 device, 1, &pFences[i], true,
706 anv_get_relative_timeout(abs_timeout));
707 break;
708 case ANV_FENCE_TYPE_SYNCOBJ:
709 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
710 true, abs_timeout);
711 break;
712 case ANV_FENCE_TYPE_WSI:
713 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
714 break;
715 case ANV_FENCE_TYPE_NONE:
716 result = VK_SUCCESS;
717 break;
718 }
719 if (result != VK_SUCCESS)
720 return result;
721 }
722 } else {
723 do {
724 for (uint32_t i = 0; i < fenceCount; i++) {
725 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
726 return VK_SUCCESS;
727 }
728 } while (gettime_ns() < abs_timeout);
729 result = VK_TIMEOUT;
730 }
731 return result;
732 }
733
734 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
735 {
736 for (uint32_t i = 0; i < fenceCount; ++i) {
737 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
738 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
739 return false;
740 }
741 return true;
742 }
743
744 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
745 {
746 for (uint32_t i = 0; i < fenceCount; ++i) {
747 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
748 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
749 return false;
750 }
751 return true;
752 }
753
754 VkResult anv_WaitForFences(
755 VkDevice _device,
756 uint32_t fenceCount,
757 const VkFence* pFences,
758 VkBool32 waitAll,
759 uint64_t timeout)
760 {
761 ANV_FROM_HANDLE(anv_device, device, _device);
762
763 if (unlikely(device->lost))
764 return VK_ERROR_DEVICE_LOST;
765
766 if (anv_all_fences_syncobj(fenceCount, pFences)) {
767 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
768 waitAll, anv_get_absolute_timeout(timeout));
769 } else if (anv_all_fences_bo(fenceCount, pFences)) {
770 return anv_wait_for_bo_fences(device, fenceCount, pFences,
771 waitAll, timeout);
772 } else {
773 return anv_wait_for_fences(device, fenceCount, pFences,
774 waitAll, anv_get_absolute_timeout(timeout));
775 }
776 }
777
778 void anv_GetPhysicalDeviceExternalFenceProperties(
779 VkPhysicalDevice physicalDevice,
780 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
781 VkExternalFencePropertiesKHR* pExternalFenceProperties)
782 {
783 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
784
785 switch (pExternalFenceInfo->handleType) {
786 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
787 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
788 if (device->has_syncobj_wait) {
789 pExternalFenceProperties->exportFromImportedHandleTypes =
790 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
791 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
792 pExternalFenceProperties->compatibleHandleTypes =
793 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
794 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
795 pExternalFenceProperties->externalFenceFeatures =
796 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
797 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
798 return;
799 }
800 break;
801
802 default:
803 break;
804 }
805
806 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
807 pExternalFenceProperties->compatibleHandleTypes = 0;
808 pExternalFenceProperties->externalFenceFeatures = 0;
809 }
810
811 VkResult anv_ImportFenceFdKHR(
812 VkDevice _device,
813 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
814 {
815 ANV_FROM_HANDLE(anv_device, device, _device);
816 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
817 int fd = pImportFenceFdInfo->fd;
818
819 assert(pImportFenceFdInfo->sType ==
820 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
821
822 struct anv_fence_impl new_impl = {
823 .type = ANV_FENCE_TYPE_NONE,
824 };
825
826 switch (pImportFenceFdInfo->handleType) {
827 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
828 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
829
830 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
831 if (!new_impl.syncobj)
832 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
833
834 break;
835
836 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
837 /* Sync files are a bit tricky. Because we want to continue using the
838 * syncobj implementation of WaitForFences, we don't use the sync file
839 * directly but instead import it into a syncobj.
840 */
841 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
842
843 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
844 if (!new_impl.syncobj)
845 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
846
847 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
848 anv_gem_syncobj_destroy(device, new_impl.syncobj);
849 return vk_errorf(device->instance, NULL,
850 VK_ERROR_INVALID_EXTERNAL_HANDLE,
851 "syncobj sync file import failed: %m");
852 }
853 break;
854
855 default:
856 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
857 }
858
859 /* From the Vulkan 1.0.53 spec:
860 *
861 * "Importing a fence payload from a file descriptor transfers
862 * ownership of the file descriptor from the application to the
863 * Vulkan implementation. The application must not perform any
864 * operations on the file descriptor after a successful import."
865 *
866 * If the import fails, we leave the file descriptor open.
867 */
868 close(fd);
869
870 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
871 anv_fence_impl_cleanup(device, &fence->temporary);
872 fence->temporary = new_impl;
873 } else {
874 anv_fence_impl_cleanup(device, &fence->permanent);
875 fence->permanent = new_impl;
876 }
877
878 return VK_SUCCESS;
879 }
880
881 VkResult anv_GetFenceFdKHR(
882 VkDevice _device,
883 const VkFenceGetFdInfoKHR* pGetFdInfo,
884 int* pFd)
885 {
886 ANV_FROM_HANDLE(anv_device, device, _device);
887 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
888
889 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
890
891 struct anv_fence_impl *impl =
892 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
893 &fence->temporary : &fence->permanent;
894
895 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
896 switch (pGetFdInfo->handleType) {
897 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
898 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
899 if (fd < 0)
900 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
901
902 *pFd = fd;
903 break;
904 }
905
906 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
907 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
908 if (fd < 0)
909 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
910
911 *pFd = fd;
912 break;
913 }
914
915 default:
916 unreachable("Invalid fence export handle type");
917 }
918
919 /* From the Vulkan 1.0.53 spec:
920 *
921 * "Export operations have the same transference as the specified handle
922 * type’s import operations. [...] If the fence was using a
923 * temporarily imported payload, the fence’s prior permanent payload
924 * will be restored.
925 */
926 if (impl == &fence->temporary)
927 anv_fence_impl_cleanup(device, impl);
928
929 return VK_SUCCESS;
930 }
931
932 // Queue semaphore functions
933
934 VkResult anv_CreateSemaphore(
935 VkDevice _device,
936 const VkSemaphoreCreateInfo* pCreateInfo,
937 const VkAllocationCallbacks* pAllocator,
938 VkSemaphore* pSemaphore)
939 {
940 ANV_FROM_HANDLE(anv_device, device, _device);
941 struct anv_semaphore *semaphore;
942
943 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
944
945 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
946 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
947 if (semaphore == NULL)
948 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
949
950 const VkExportSemaphoreCreateInfoKHR *export =
951 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
952 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
953 export ? export->handleTypes : 0;
954
955 if (handleTypes == 0) {
956 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
957 * on the same ring. Since we don't expose the blit engine as a DMA
958 * queue, a dummy no-op semaphore is a perfectly valid implementation.
959 */
960 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
961 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
962 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
963 if (device->instance->physicalDevice.has_syncobj) {
964 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
965 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
966 if (!semaphore->permanent.syncobj) {
967 vk_free2(&device->alloc, pAllocator, semaphore);
968 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
969 }
970 } else {
971 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
972 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
973 4096, 0,
974 &semaphore->permanent.bo);
975 if (result != VK_SUCCESS) {
976 vk_free2(&device->alloc, pAllocator, semaphore);
977 return result;
978 }
979
980 /* If we're going to use this as a fence, we need to *not* have the
981 * EXEC_OBJECT_ASYNC bit set.
982 */
983 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
984 }
985 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
986 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
987
988 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
989 semaphore->permanent.fd = -1;
990 } else {
991 assert(!"Unknown handle type");
992 vk_free2(&device->alloc, pAllocator, semaphore);
993 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
994 }
995
996 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
997
998 *pSemaphore = anv_semaphore_to_handle(semaphore);
999
1000 return VK_SUCCESS;
1001 }
1002
1003 static void
1004 anv_semaphore_impl_cleanup(struct anv_device *device,
1005 struct anv_semaphore_impl *impl)
1006 {
1007 switch (impl->type) {
1008 case ANV_SEMAPHORE_TYPE_NONE:
1009 case ANV_SEMAPHORE_TYPE_DUMMY:
1010 /* Dummy. Nothing to do */
1011 break;
1012
1013 case ANV_SEMAPHORE_TYPE_BO:
1014 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
1015 break;
1016
1017 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1018 close(impl->fd);
1019 break;
1020
1021 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1022 anv_gem_syncobj_destroy(device, impl->syncobj);
1023 break;
1024
1025 default:
1026 unreachable("Invalid semaphore type");
1027 }
1028
1029 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1030 }
1031
1032 void
1033 anv_semaphore_reset_temporary(struct anv_device *device,
1034 struct anv_semaphore *semaphore)
1035 {
1036 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1037 return;
1038
1039 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1040 }
1041
1042 void anv_DestroySemaphore(
1043 VkDevice _device,
1044 VkSemaphore _semaphore,
1045 const VkAllocationCallbacks* pAllocator)
1046 {
1047 ANV_FROM_HANDLE(anv_device, device, _device);
1048 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1049
1050 if (semaphore == NULL)
1051 return;
1052
1053 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1054 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1055
1056 vk_free2(&device->alloc, pAllocator, semaphore);
1057 }
1058
1059 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1060 VkPhysicalDevice physicalDevice,
1061 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
1062 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
1063 {
1064 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1065
1066 switch (pExternalSemaphoreInfo->handleType) {
1067 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1068 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1069 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1070 pExternalSemaphoreProperties->compatibleHandleTypes =
1071 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1072 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1073 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1074 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1075 return;
1076
1077 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1078 if (device->has_exec_fence) {
1079 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1080 pExternalSemaphoreProperties->compatibleHandleTypes =
1081 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1082 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1083 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1084 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1085 return;
1086 }
1087 break;
1088
1089 default:
1090 break;
1091 }
1092
1093 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1094 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1095 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1096 }
1097
1098 VkResult anv_ImportSemaphoreFdKHR(
1099 VkDevice _device,
1100 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1101 {
1102 ANV_FROM_HANDLE(anv_device, device, _device);
1103 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1104 int fd = pImportSemaphoreFdInfo->fd;
1105
1106 struct anv_semaphore_impl new_impl = {
1107 .type = ANV_SEMAPHORE_TYPE_NONE,
1108 };
1109
1110 switch (pImportSemaphoreFdInfo->handleType) {
1111 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1112 if (device->instance->physicalDevice.has_syncobj) {
1113 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1114
1115 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1116 if (!new_impl.syncobj)
1117 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1118 } else {
1119 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1120
1121 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1122 fd, 0, &new_impl.bo);
1123 if (result != VK_SUCCESS)
1124 return result;
1125
1126 if (new_impl.bo->size < 4096) {
1127 anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1128 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1129 }
1130
1131 /* If we're going to use this as a fence, we need to *not* have the
1132 * EXEC_OBJECT_ASYNC bit set.
1133 */
1134 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1135 }
1136
1137 /* From the Vulkan spec:
1138 *
1139 * "Importing semaphore state from a file descriptor transfers
1140 * ownership of the file descriptor from the application to the
1141 * Vulkan implementation. The application must not perform any
1142 * operations on the file descriptor after a successful import."
1143 *
1144 * If the import fails, we leave the file descriptor open.
1145 */
1146 close(fd);
1147 break;
1148
1149 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1150 new_impl = (struct anv_semaphore_impl) {
1151 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1152 .fd = fd,
1153 };
1154 break;
1155
1156 default:
1157 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1158 }
1159
1160 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1161 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1162 semaphore->temporary = new_impl;
1163 } else {
1164 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1165 semaphore->permanent = new_impl;
1166 }
1167
1168 return VK_SUCCESS;
1169 }
1170
1171 VkResult anv_GetSemaphoreFdKHR(
1172 VkDevice _device,
1173 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1174 int* pFd)
1175 {
1176 ANV_FROM_HANDLE(anv_device, device, _device);
1177 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1178 VkResult result;
1179 int fd;
1180
1181 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1182
1183 struct anv_semaphore_impl *impl =
1184 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1185 &semaphore->temporary : &semaphore->permanent;
1186
1187 switch (impl->type) {
1188 case ANV_SEMAPHORE_TYPE_BO:
1189 result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1190 if (result != VK_SUCCESS)
1191 return result;
1192 break;
1193
1194 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1195 /* There are two reasons why this could happen:
1196 *
1197 * 1) The user is trying to export without submitting something that
1198 * signals the semaphore. If this is the case, it's their bug so
1199 * what we return here doesn't matter.
1200 *
1201 * 2) The kernel didn't give us a file descriptor. The most likely
1202 * reason for this is running out of file descriptors.
1203 */
1204 if (impl->fd < 0)
1205 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1206
1207 *pFd = impl->fd;
1208
1209 /* From the Vulkan 1.0.53 spec:
1210 *
1211 * "...exporting a semaphore payload to a handle with copy
1212 * transference has the same side effects on the source
1213 * semaphore’s payload as executing a semaphore wait operation."
1214 *
1215 * In other words, it may still be a SYNC_FD semaphore, but it's now
1216 * considered to have been waited on and no longer has a sync file
1217 * attached.
1218 */
1219 impl->fd = -1;
1220 return VK_SUCCESS;
1221
1222 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1223 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1224 if (fd < 0)
1225 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1226 *pFd = fd;
1227 break;
1228
1229 default:
1230 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1231 }
1232
1233 /* From the Vulkan 1.0.53 spec:
1234 *
1235 * "Export operations have the same transference as the specified handle
1236 * type’s import operations. [...] If the semaphore was using a
1237 * temporarily imported payload, the semaphore’s prior permanent payload
1238 * will be restored.
1239 */
1240 if (impl == &semaphore->temporary)
1241 anv_semaphore_impl_cleanup(device, impl);
1242
1243 return VK_SUCCESS;
1244 }