anv: Implement new way for setting streamout buffers.
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30
31 #include "anv_private.h"
32 #include "vk_util.h"
33
34 #include "genxml/gen7_pack.h"
35
36 VkResult
37 anv_device_execbuf(struct anv_device *device,
38 struct drm_i915_gem_execbuffer2 *execbuf,
39 struct anv_bo **execbuf_bos)
40 {
41 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
42 if (ret != 0) {
43 /* We don't know the real error. */
44 return anv_device_set_lost(device, "execbuf2 failed: %m");
45 }
46
47 struct drm_i915_gem_exec_object2 *objects =
48 (void *)(uintptr_t)execbuf->buffers_ptr;
49 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
50 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
51 assert(execbuf_bos[k]->offset == objects[k].offset);
52 execbuf_bos[k]->offset = objects[k].offset;
53 }
54
55 return VK_SUCCESS;
56 }
57
58 VkResult
59 anv_device_submit_simple_batch(struct anv_device *device,
60 struct anv_batch *batch)
61 {
62 struct drm_i915_gem_execbuffer2 execbuf;
63 struct drm_i915_gem_exec_object2 exec2_objects[1];
64 struct anv_bo bo, *exec_bos[1];
65 VkResult result = VK_SUCCESS;
66 uint32_t size;
67
68 /* Kernel driver requires 8 byte aligned batch length */
69 size = align_u32(batch->next - batch->start, 8);
70 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
71 if (result != VK_SUCCESS)
72 return result;
73
74 memcpy(bo.map, batch->start, size);
75 if (!device->info.has_llc)
76 gen_flush_range(bo.map, size);
77
78 exec_bos[0] = &bo;
79 exec2_objects[0].handle = bo.gem_handle;
80 exec2_objects[0].relocation_count = 0;
81 exec2_objects[0].relocs_ptr = 0;
82 exec2_objects[0].alignment = 0;
83 exec2_objects[0].offset = bo.offset;
84 exec2_objects[0].flags = bo.flags;
85 exec2_objects[0].rsvd1 = 0;
86 exec2_objects[0].rsvd2 = 0;
87
88 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
89 execbuf.buffer_count = 1;
90 execbuf.batch_start_offset = 0;
91 execbuf.batch_len = size;
92 execbuf.cliprects_ptr = 0;
93 execbuf.num_cliprects = 0;
94 execbuf.DR1 = 0;
95 execbuf.DR4 = 0;
96
97 execbuf.flags =
98 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
99 execbuf.rsvd1 = device->context_id;
100 execbuf.rsvd2 = 0;
101
102 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
103 gen_print_batch(&device->decoder_ctx, bo.map, bo.size, bo.offset, false);
104
105 result = anv_device_execbuf(device, &execbuf, exec_bos);
106 if (result != VK_SUCCESS)
107 goto fail;
108
109 result = anv_device_wait(device, &bo, INT64_MAX);
110
111 fail:
112 anv_bo_pool_free(&device->batch_bo_pool, &bo);
113
114 return result;
115 }
116
117 VkResult anv_QueueSubmit(
118 VkQueue _queue,
119 uint32_t submitCount,
120 const VkSubmitInfo* pSubmits,
121 VkFence fence)
122 {
123 ANV_FROM_HANDLE(anv_queue, queue, _queue);
124 struct anv_device *device = queue->device;
125
126 /* Query for device status prior to submitting. Technically, we don't need
127 * to do this. However, if we have a client that's submitting piles of
128 * garbage, we would rather break as early as possible to keep the GPU
129 * hanging contained. If we don't check here, we'll either be waiting for
130 * the kernel to kick us or we'll have to wait until the client waits on a
131 * fence before we actually know whether or not we've hung.
132 */
133 VkResult result = anv_device_query_status(device);
134 if (result != VK_SUCCESS)
135 return result;
136
137 /* We lock around QueueSubmit for three main reasons:
138 *
139 * 1) When a block pool is resized, we create a new gem handle with a
140 * different size and, in the case of surface states, possibly a
141 * different center offset but we re-use the same anv_bo struct when
142 * we do so. If this happens in the middle of setting up an execbuf,
143 * we could end up with our list of BOs out of sync with our list of
144 * gem handles.
145 *
146 * 2) The algorithm we use for building the list of unique buffers isn't
147 * thread-safe. While the client is supposed to syncronize around
148 * QueueSubmit, this would be extremely difficult to debug if it ever
149 * came up in the wild due to a broken app. It's better to play it
150 * safe and just lock around QueueSubmit.
151 *
152 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
153 * userspace. Due to the fact that the surface state buffer is shared
154 * between batches, we can't afford to have that happen from multiple
155 * threads at the same time. Even though the user is supposed to
156 * ensure this doesn't happen, we play it safe as in (2) above.
157 *
158 * Since the only other things that ever take the device lock such as block
159 * pool resize only rarely happen, this will almost never be contended so
160 * taking a lock isn't really an expensive operation in this case.
161 */
162 pthread_mutex_lock(&device->mutex);
163
164 if (fence && submitCount == 0) {
165 /* If we don't have any command buffers, we need to submit a dummy
166 * batch to give GEM something to wait on. We could, potentially,
167 * come up with something more efficient but this shouldn't be a
168 * common case.
169 */
170 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
171 goto out;
172 }
173
174 for (uint32_t i = 0; i < submitCount; i++) {
175 /* Fence for this submit. NULL for all but the last one */
176 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
177
178 if (pSubmits[i].commandBufferCount == 0) {
179 /* If we don't have any command buffers, we need to submit a dummy
180 * batch to give GEM something to wait on. We could, potentially,
181 * come up with something more efficient but this shouldn't be a
182 * common case.
183 */
184 result = anv_cmd_buffer_execbuf(device, NULL,
185 pSubmits[i].pWaitSemaphores,
186 pSubmits[i].waitSemaphoreCount,
187 pSubmits[i].pSignalSemaphores,
188 pSubmits[i].signalSemaphoreCount,
189 submit_fence);
190 if (result != VK_SUCCESS)
191 goto out;
192
193 continue;
194 }
195
196 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
197 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
198 pSubmits[i].pCommandBuffers[j]);
199 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
200 assert(!anv_batch_has_error(&cmd_buffer->batch));
201
202 /* Fence for this execbuf. NULL for all but the last one */
203 VkFence execbuf_fence =
204 (j == pSubmits[i].commandBufferCount - 1) ?
205 submit_fence : VK_NULL_HANDLE;
206
207 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
208 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
209 if (j == 0) {
210 /* Only the first batch gets the in semaphores */
211 in_semaphores = pSubmits[i].pWaitSemaphores;
212 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
213 }
214
215 if (j == pSubmits[i].commandBufferCount - 1) {
216 /* Only the last batch gets the out semaphores */
217 out_semaphores = pSubmits[i].pSignalSemaphores;
218 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
219 }
220
221 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
222 in_semaphores, num_in_semaphores,
223 out_semaphores, num_out_semaphores,
224 execbuf_fence);
225 if (result != VK_SUCCESS)
226 goto out;
227 }
228 }
229
230 pthread_cond_broadcast(&device->queue_submit);
231
232 out:
233 if (result != VK_SUCCESS) {
234 /* In the case that something has gone wrong we may end up with an
235 * inconsistent state from which it may not be trivial to recover.
236 * For example, we might have computed address relocations and
237 * any future attempt to re-submit this job will need to know about
238 * this and avoid computing relocation addresses again.
239 *
240 * To avoid this sort of issues, we assume that if something was
241 * wrong during submission we must already be in a really bad situation
242 * anyway (such us being out of memory) and return
243 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
244 * submit the same job again to this device.
245 */
246 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
247 }
248
249 pthread_mutex_unlock(&device->mutex);
250
251 return result;
252 }
253
254 VkResult anv_QueueWaitIdle(
255 VkQueue _queue)
256 {
257 ANV_FROM_HANDLE(anv_queue, queue, _queue);
258
259 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
260 }
261
262 VkResult anv_CreateFence(
263 VkDevice _device,
264 const VkFenceCreateInfo* pCreateInfo,
265 const VkAllocationCallbacks* pAllocator,
266 VkFence* pFence)
267 {
268 ANV_FROM_HANDLE(anv_device, device, _device);
269 struct anv_fence *fence;
270
271 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
272
273 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
274 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
275 if (fence == NULL)
276 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
277
278 if (device->instance->physicalDevice.has_syncobj_wait) {
279 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
280
281 uint32_t create_flags = 0;
282 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
283 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
284
285 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
286 if (!fence->permanent.syncobj)
287 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
288 } else {
289 fence->permanent.type = ANV_FENCE_TYPE_BO;
290
291 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
292 &fence->permanent.bo.bo, 4096);
293 if (result != VK_SUCCESS)
294 return result;
295
296 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
297 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
298 } else {
299 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
300 }
301 }
302
303 *pFence = anv_fence_to_handle(fence);
304
305 return VK_SUCCESS;
306 }
307
308 static void
309 anv_fence_impl_cleanup(struct anv_device *device,
310 struct anv_fence_impl *impl)
311 {
312 switch (impl->type) {
313 case ANV_FENCE_TYPE_NONE:
314 /* Dummy. Nothing to do */
315 break;
316
317 case ANV_FENCE_TYPE_BO:
318 anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
319 break;
320
321 case ANV_FENCE_TYPE_SYNCOBJ:
322 anv_gem_syncobj_destroy(device, impl->syncobj);
323 break;
324
325 case ANV_FENCE_TYPE_WSI:
326 impl->fence_wsi->destroy(impl->fence_wsi);
327 break;
328
329 default:
330 unreachable("Invalid fence type");
331 }
332
333 impl->type = ANV_FENCE_TYPE_NONE;
334 }
335
336 void anv_DestroyFence(
337 VkDevice _device,
338 VkFence _fence,
339 const VkAllocationCallbacks* pAllocator)
340 {
341 ANV_FROM_HANDLE(anv_device, device, _device);
342 ANV_FROM_HANDLE(anv_fence, fence, _fence);
343
344 if (!fence)
345 return;
346
347 anv_fence_impl_cleanup(device, &fence->temporary);
348 anv_fence_impl_cleanup(device, &fence->permanent);
349
350 vk_free2(&device->alloc, pAllocator, fence);
351 }
352
353 VkResult anv_ResetFences(
354 VkDevice _device,
355 uint32_t fenceCount,
356 const VkFence* pFences)
357 {
358 ANV_FROM_HANDLE(anv_device, device, _device);
359
360 for (uint32_t i = 0; i < fenceCount; i++) {
361 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
362
363 /* From the Vulkan 1.0.53 spec:
364 *
365 * "If any member of pFences currently has its payload imported with
366 * temporary permanence, that fence’s prior permanent payload is
367 * first restored. The remaining operations described therefore
368 * operate on the restored payload.
369 */
370 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
371 anv_fence_impl_cleanup(device, &fence->temporary);
372
373 struct anv_fence_impl *impl = &fence->permanent;
374
375 switch (impl->type) {
376 case ANV_FENCE_TYPE_BO:
377 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
378 break;
379
380 case ANV_FENCE_TYPE_SYNCOBJ:
381 anv_gem_syncobj_reset(device, impl->syncobj);
382 break;
383
384 default:
385 unreachable("Invalid fence type");
386 }
387 }
388
389 return VK_SUCCESS;
390 }
391
392 VkResult anv_GetFenceStatus(
393 VkDevice _device,
394 VkFence _fence)
395 {
396 ANV_FROM_HANDLE(anv_device, device, _device);
397 ANV_FROM_HANDLE(anv_fence, fence, _fence);
398
399 if (anv_device_is_lost(device))
400 return VK_ERROR_DEVICE_LOST;
401
402 struct anv_fence_impl *impl =
403 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
404 &fence->temporary : &fence->permanent;
405
406 switch (impl->type) {
407 case ANV_FENCE_TYPE_BO:
408 /* BO fences don't support import/export */
409 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
410 switch (impl->bo.state) {
411 case ANV_BO_FENCE_STATE_RESET:
412 /* If it hasn't even been sent off to the GPU yet, it's not ready */
413 return VK_NOT_READY;
414
415 case ANV_BO_FENCE_STATE_SIGNALED:
416 /* It's been signaled, return success */
417 return VK_SUCCESS;
418
419 case ANV_BO_FENCE_STATE_SUBMITTED: {
420 VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
421 if (result == VK_SUCCESS) {
422 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
423 return VK_SUCCESS;
424 } else {
425 return result;
426 }
427 }
428 default:
429 unreachable("Invalid fence status");
430 }
431
432 case ANV_FENCE_TYPE_SYNCOBJ: {
433 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
434 if (ret == -1) {
435 if (errno == ETIME) {
436 return VK_NOT_READY;
437 } else {
438 /* We don't know the real error. */
439 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
440 }
441 } else {
442 return VK_SUCCESS;
443 }
444 }
445
446 default:
447 unreachable("Invalid fence type");
448 }
449 }
450
451 #define NSEC_PER_SEC 1000000000
452 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
453
454 static uint64_t
455 gettime_ns(void)
456 {
457 struct timespec current;
458 clock_gettime(CLOCK_MONOTONIC, &current);
459 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
460 }
461
462 static uint64_t anv_get_absolute_timeout(uint64_t timeout)
463 {
464 if (timeout == 0)
465 return 0;
466 uint64_t current_time = gettime_ns();
467 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
468
469 timeout = MIN2(max_timeout, timeout);
470
471 return (current_time + timeout);
472 }
473
474 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
475 {
476 uint64_t now = gettime_ns();
477
478 /* We don't want negative timeouts.
479 *
480 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
481 * supposed to block indefinitely timeouts < 0. Unfortunately,
482 * this was broken for a couple of kernel releases. Since there's
483 * no way to know whether or not the kernel we're using is one of
484 * the broken ones, the best we can do is to clamp the timeout to
485 * INT64_MAX. This limits the maximum timeout from 584 years to
486 * 292 years - likely not a big deal.
487 */
488 if (abs_timeout < now)
489 return 0;
490
491 uint64_t rel_timeout = abs_timeout - now;
492 if (rel_timeout > (uint64_t) INT64_MAX)
493 rel_timeout = INT64_MAX;
494
495 return rel_timeout;
496 }
497
498 static VkResult
499 anv_wait_for_syncobj_fences(struct anv_device *device,
500 uint32_t fenceCount,
501 const VkFence *pFences,
502 bool waitAll,
503 uint64_t abs_timeout_ns)
504 {
505 uint32_t *syncobjs = vk_zalloc(&device->alloc,
506 sizeof(*syncobjs) * fenceCount, 8,
507 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
508 if (!syncobjs)
509 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
510
511 for (uint32_t i = 0; i < fenceCount; i++) {
512 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
513 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
514
515 struct anv_fence_impl *impl =
516 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
517 &fence->temporary : &fence->permanent;
518
519 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
520 syncobjs[i] = impl->syncobj;
521 }
522
523 /* The gem_syncobj_wait ioctl may return early due to an inherent
524 * limitation in the way it computes timeouts. Loop until we've actually
525 * passed the timeout.
526 */
527 int ret;
528 do {
529 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
530 abs_timeout_ns, waitAll);
531 } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
532
533 vk_free(&device->alloc, syncobjs);
534
535 if (ret == -1) {
536 if (errno == ETIME) {
537 return VK_TIMEOUT;
538 } else {
539 /* We don't know the real error. */
540 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
541 }
542 } else {
543 return VK_SUCCESS;
544 }
545 }
546
547 static VkResult
548 anv_wait_for_bo_fences(struct anv_device *device,
549 uint32_t fenceCount,
550 const VkFence *pFences,
551 bool waitAll,
552 uint64_t abs_timeout_ns)
553 {
554 VkResult result = VK_SUCCESS;
555 uint32_t pending_fences = fenceCount;
556 while (pending_fences) {
557 pending_fences = 0;
558 bool signaled_fences = false;
559 for (uint32_t i = 0; i < fenceCount; i++) {
560 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
561
562 /* This function assumes that all fences are BO fences and that they
563 * have no temporary state. Since BO fences will never be exported,
564 * this should be a safe assumption.
565 */
566 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
567 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
568 struct anv_fence_impl *impl = &fence->permanent;
569
570 switch (impl->bo.state) {
571 case ANV_BO_FENCE_STATE_RESET:
572 /* This fence hasn't been submitted yet, we'll catch it the next
573 * time around. Yes, this may mean we dead-loop but, short of
574 * lots of locking and a condition variable, there's not much that
575 * we can do about that.
576 */
577 pending_fences++;
578 continue;
579
580 case ANV_BO_FENCE_STATE_SIGNALED:
581 /* This fence is not pending. If waitAll isn't set, we can return
582 * early. Otherwise, we have to keep going.
583 */
584 if (!waitAll) {
585 result = VK_SUCCESS;
586 goto done;
587 }
588 continue;
589
590 case ANV_BO_FENCE_STATE_SUBMITTED:
591 /* These are the fences we really care about. Go ahead and wait
592 * on it until we hit a timeout.
593 */
594 result = anv_device_wait(device, &impl->bo.bo,
595 anv_get_relative_timeout(abs_timeout_ns));
596 switch (result) {
597 case VK_SUCCESS:
598 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
599 signaled_fences = true;
600 if (!waitAll)
601 goto done;
602 break;
603
604 case VK_TIMEOUT:
605 goto done;
606
607 default:
608 return result;
609 }
610 }
611 }
612
613 if (pending_fences && !signaled_fences) {
614 /* If we've hit this then someone decided to vkWaitForFences before
615 * they've actually submitted any of them to a queue. This is a
616 * fairly pessimal case, so it's ok to lock here and use a standard
617 * pthreads condition variable.
618 */
619 pthread_mutex_lock(&device->mutex);
620
621 /* It's possible that some of the fences have changed state since the
622 * last time we checked. Now that we have the lock, check for
623 * pending fences again and don't wait if it's changed.
624 */
625 uint32_t now_pending_fences = 0;
626 for (uint32_t i = 0; i < fenceCount; i++) {
627 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
628 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
629 now_pending_fences++;
630 }
631 assert(now_pending_fences <= pending_fences);
632
633 if (now_pending_fences == pending_fences) {
634 struct timespec abstime = {
635 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
636 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
637 };
638
639 ASSERTED int ret;
640 ret = pthread_cond_timedwait(&device->queue_submit,
641 &device->mutex, &abstime);
642 assert(ret != EINVAL);
643 if (gettime_ns() >= abs_timeout_ns) {
644 pthread_mutex_unlock(&device->mutex);
645 result = VK_TIMEOUT;
646 goto done;
647 }
648 }
649
650 pthread_mutex_unlock(&device->mutex);
651 }
652 }
653
654 done:
655 if (anv_device_is_lost(device))
656 return VK_ERROR_DEVICE_LOST;
657
658 return result;
659 }
660
661 static VkResult
662 anv_wait_for_wsi_fence(struct anv_device *device,
663 const VkFence _fence,
664 uint64_t abs_timeout)
665 {
666 ANV_FROM_HANDLE(anv_fence, fence, _fence);
667 struct anv_fence_impl *impl = &fence->permanent;
668
669 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
670 }
671
672 static VkResult
673 anv_wait_for_fences(struct anv_device *device,
674 uint32_t fenceCount,
675 const VkFence *pFences,
676 bool waitAll,
677 uint64_t abs_timeout)
678 {
679 VkResult result = VK_SUCCESS;
680
681 if (fenceCount <= 1 || waitAll) {
682 for (uint32_t i = 0; i < fenceCount; i++) {
683 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
684 switch (fence->permanent.type) {
685 case ANV_FENCE_TYPE_BO:
686 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
687 true, abs_timeout);
688 break;
689 case ANV_FENCE_TYPE_SYNCOBJ:
690 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
691 true, abs_timeout);
692 break;
693 case ANV_FENCE_TYPE_WSI:
694 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
695 break;
696 case ANV_FENCE_TYPE_NONE:
697 result = VK_SUCCESS;
698 break;
699 }
700 if (result != VK_SUCCESS)
701 return result;
702 }
703 } else {
704 do {
705 for (uint32_t i = 0; i < fenceCount; i++) {
706 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
707 return VK_SUCCESS;
708 }
709 } while (gettime_ns() < abs_timeout);
710 result = VK_TIMEOUT;
711 }
712 return result;
713 }
714
715 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
716 {
717 for (uint32_t i = 0; i < fenceCount; ++i) {
718 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
719 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
720 return false;
721 }
722 return true;
723 }
724
725 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
726 {
727 for (uint32_t i = 0; i < fenceCount; ++i) {
728 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
729 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
730 return false;
731 }
732 return true;
733 }
734
735 VkResult anv_WaitForFences(
736 VkDevice _device,
737 uint32_t fenceCount,
738 const VkFence* pFences,
739 VkBool32 waitAll,
740 uint64_t timeout)
741 {
742 ANV_FROM_HANDLE(anv_device, device, _device);
743
744 if (anv_device_is_lost(device))
745 return VK_ERROR_DEVICE_LOST;
746
747 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
748 if (anv_all_fences_syncobj(fenceCount, pFences)) {
749 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
750 waitAll, abs_timeout);
751 } else if (anv_all_fences_bo(fenceCount, pFences)) {
752 return anv_wait_for_bo_fences(device, fenceCount, pFences,
753 waitAll, abs_timeout);
754 } else {
755 return anv_wait_for_fences(device, fenceCount, pFences,
756 waitAll, abs_timeout);
757 }
758 }
759
760 void anv_GetPhysicalDeviceExternalFenceProperties(
761 VkPhysicalDevice physicalDevice,
762 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
763 VkExternalFenceProperties* pExternalFenceProperties)
764 {
765 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
766
767 switch (pExternalFenceInfo->handleType) {
768 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
769 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
770 if (device->has_syncobj_wait) {
771 pExternalFenceProperties->exportFromImportedHandleTypes =
772 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
773 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
774 pExternalFenceProperties->compatibleHandleTypes =
775 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
776 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
777 pExternalFenceProperties->externalFenceFeatures =
778 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
779 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
780 return;
781 }
782 break;
783
784 default:
785 break;
786 }
787
788 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
789 pExternalFenceProperties->compatibleHandleTypes = 0;
790 pExternalFenceProperties->externalFenceFeatures = 0;
791 }
792
793 VkResult anv_ImportFenceFdKHR(
794 VkDevice _device,
795 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
796 {
797 ANV_FROM_HANDLE(anv_device, device, _device);
798 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
799 int fd = pImportFenceFdInfo->fd;
800
801 assert(pImportFenceFdInfo->sType ==
802 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
803
804 struct anv_fence_impl new_impl = {
805 .type = ANV_FENCE_TYPE_NONE,
806 };
807
808 switch (pImportFenceFdInfo->handleType) {
809 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
810 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
811
812 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
813 if (!new_impl.syncobj)
814 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
815
816 break;
817
818 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
819 /* Sync files are a bit tricky. Because we want to continue using the
820 * syncobj implementation of WaitForFences, we don't use the sync file
821 * directly but instead import it into a syncobj.
822 */
823 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
824
825 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
826 if (!new_impl.syncobj)
827 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
828
829 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
830 anv_gem_syncobj_destroy(device, new_impl.syncobj);
831 return vk_errorf(device->instance, NULL,
832 VK_ERROR_INVALID_EXTERNAL_HANDLE,
833 "syncobj sync file import failed: %m");
834 }
835 break;
836
837 default:
838 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
839 }
840
841 /* From the Vulkan 1.0.53 spec:
842 *
843 * "Importing a fence payload from a file descriptor transfers
844 * ownership of the file descriptor from the application to the
845 * Vulkan implementation. The application must not perform any
846 * operations on the file descriptor after a successful import."
847 *
848 * If the import fails, we leave the file descriptor open.
849 */
850 close(fd);
851
852 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
853 anv_fence_impl_cleanup(device, &fence->temporary);
854 fence->temporary = new_impl;
855 } else {
856 anv_fence_impl_cleanup(device, &fence->permanent);
857 fence->permanent = new_impl;
858 }
859
860 return VK_SUCCESS;
861 }
862
863 VkResult anv_GetFenceFdKHR(
864 VkDevice _device,
865 const VkFenceGetFdInfoKHR* pGetFdInfo,
866 int* pFd)
867 {
868 ANV_FROM_HANDLE(anv_device, device, _device);
869 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
870
871 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
872
873 struct anv_fence_impl *impl =
874 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
875 &fence->temporary : &fence->permanent;
876
877 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
878 switch (pGetFdInfo->handleType) {
879 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
880 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
881 if (fd < 0)
882 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
883
884 *pFd = fd;
885 break;
886 }
887
888 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
889 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
890 if (fd < 0)
891 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
892
893 *pFd = fd;
894 break;
895 }
896
897 default:
898 unreachable("Invalid fence export handle type");
899 }
900
901 /* From the Vulkan 1.0.53 spec:
902 *
903 * "Export operations have the same transference as the specified handle
904 * type’s import operations. [...] If the fence was using a
905 * temporarily imported payload, the fence’s prior permanent payload
906 * will be restored.
907 */
908 if (impl == &fence->temporary)
909 anv_fence_impl_cleanup(device, impl);
910
911 return VK_SUCCESS;
912 }
913
914 // Queue semaphore functions
915
916 VkResult anv_CreateSemaphore(
917 VkDevice _device,
918 const VkSemaphoreCreateInfo* pCreateInfo,
919 const VkAllocationCallbacks* pAllocator,
920 VkSemaphore* pSemaphore)
921 {
922 ANV_FROM_HANDLE(anv_device, device, _device);
923 struct anv_semaphore *semaphore;
924
925 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
926
927 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
928 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
929 if (semaphore == NULL)
930 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
931
932 const VkExportSemaphoreCreateInfo *export =
933 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
934 VkExternalSemaphoreHandleTypeFlags handleTypes =
935 export ? export->handleTypes : 0;
936
937 if (handleTypes == 0) {
938 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
939 * on the same ring. Since we don't expose the blit engine as a DMA
940 * queue, a dummy no-op semaphore is a perfectly valid implementation.
941 */
942 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
943 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
944 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
945 if (device->instance->physicalDevice.has_syncobj) {
946 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
947 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
948 if (!semaphore->permanent.syncobj) {
949 vk_free2(&device->alloc, pAllocator, semaphore);
950 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
951 }
952 } else {
953 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
954 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
955 4096, ANV_BO_EXTERNAL,
956 &semaphore->permanent.bo);
957 if (result != VK_SUCCESS) {
958 vk_free2(&device->alloc, pAllocator, semaphore);
959 return result;
960 }
961
962 /* If we're going to use this as a fence, we need to *not* have the
963 * EXEC_OBJECT_ASYNC bit set.
964 */
965 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
966 }
967 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
968 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
969 if (device->instance->physicalDevice.has_syncobj) {
970 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
971 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
972 } else {
973 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
974 semaphore->permanent.fd = -1;
975 }
976 } else {
977 assert(!"Unknown handle type");
978 vk_free2(&device->alloc, pAllocator, semaphore);
979 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
980 }
981
982 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
983
984 *pSemaphore = anv_semaphore_to_handle(semaphore);
985
986 return VK_SUCCESS;
987 }
988
989 static void
990 anv_semaphore_impl_cleanup(struct anv_device *device,
991 struct anv_semaphore_impl *impl)
992 {
993 switch (impl->type) {
994 case ANV_SEMAPHORE_TYPE_NONE:
995 case ANV_SEMAPHORE_TYPE_DUMMY:
996 /* Dummy. Nothing to do */
997 break;
998
999 case ANV_SEMAPHORE_TYPE_BO:
1000 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
1001 break;
1002
1003 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1004 close(impl->fd);
1005 break;
1006
1007 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1008 anv_gem_syncobj_destroy(device, impl->syncobj);
1009 break;
1010
1011 default:
1012 unreachable("Invalid semaphore type");
1013 }
1014
1015 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1016 }
1017
1018 void
1019 anv_semaphore_reset_temporary(struct anv_device *device,
1020 struct anv_semaphore *semaphore)
1021 {
1022 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1023 return;
1024
1025 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1026 }
1027
1028 void anv_DestroySemaphore(
1029 VkDevice _device,
1030 VkSemaphore _semaphore,
1031 const VkAllocationCallbacks* pAllocator)
1032 {
1033 ANV_FROM_HANDLE(anv_device, device, _device);
1034 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1035
1036 if (semaphore == NULL)
1037 return;
1038
1039 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1040 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1041
1042 vk_free2(&device->alloc, pAllocator, semaphore);
1043 }
1044
1045 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1046 VkPhysicalDevice physicalDevice,
1047 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1048 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1049 {
1050 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1051
1052 switch (pExternalSemaphoreInfo->handleType) {
1053 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1054 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1055 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1056 pExternalSemaphoreProperties->compatibleHandleTypes =
1057 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1058 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1059 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1060 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1061 return;
1062
1063 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1064 if (device->has_exec_fence) {
1065 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1066 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1067 pExternalSemaphoreProperties->compatibleHandleTypes =
1068 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1069 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1070 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1071 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1072 return;
1073 }
1074 break;
1075
1076 default:
1077 break;
1078 }
1079
1080 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1081 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1082 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1083 }
1084
1085 VkResult anv_ImportSemaphoreFdKHR(
1086 VkDevice _device,
1087 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1088 {
1089 ANV_FROM_HANDLE(anv_device, device, _device);
1090 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1091 int fd = pImportSemaphoreFdInfo->fd;
1092
1093 struct anv_semaphore_impl new_impl = {
1094 .type = ANV_SEMAPHORE_TYPE_NONE,
1095 };
1096
1097 switch (pImportSemaphoreFdInfo->handleType) {
1098 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1099 if (device->instance->physicalDevice.has_syncobj) {
1100 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1101
1102 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1103 if (!new_impl.syncobj)
1104 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1105 } else {
1106 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1107
1108 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1109 fd, ANV_BO_EXTERNAL,
1110 &new_impl.bo);
1111 if (result != VK_SUCCESS)
1112 return result;
1113
1114 if (new_impl.bo->size < 4096) {
1115 anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1116 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1117 }
1118
1119 /* If we're going to use this as a fence, we need to *not* have the
1120 * EXEC_OBJECT_ASYNC bit set.
1121 */
1122 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1123 }
1124
1125 /* From the Vulkan spec:
1126 *
1127 * "Importing semaphore state from a file descriptor transfers
1128 * ownership of the file descriptor from the application to the
1129 * Vulkan implementation. The application must not perform any
1130 * operations on the file descriptor after a successful import."
1131 *
1132 * If the import fails, we leave the file descriptor open.
1133 */
1134 close(fd);
1135 break;
1136
1137 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1138 if (device->instance->physicalDevice.has_syncobj) {
1139 new_impl = (struct anv_semaphore_impl) {
1140 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1141 .syncobj = anv_gem_syncobj_create(device, 0),
1142 };
1143 if (!new_impl.syncobj)
1144 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1145 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1146 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1147 return vk_errorf(device->instance, NULL,
1148 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1149 "syncobj sync file import failed: %m");
1150 }
1151 /* Ownership of the FD is transfered to Anv. Since we don't need it
1152 * anymore because the associated fence has been put into a syncobj,
1153 * we must close the FD.
1154 */
1155 close(fd);
1156 } else {
1157 new_impl = (struct anv_semaphore_impl) {
1158 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1159 .fd = fd,
1160 };
1161 }
1162 break;
1163
1164 default:
1165 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1166 }
1167
1168 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1169 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1170 semaphore->temporary = new_impl;
1171 } else {
1172 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1173 semaphore->permanent = new_impl;
1174 }
1175
1176 return VK_SUCCESS;
1177 }
1178
1179 VkResult anv_GetSemaphoreFdKHR(
1180 VkDevice _device,
1181 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1182 int* pFd)
1183 {
1184 ANV_FROM_HANDLE(anv_device, device, _device);
1185 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1186 VkResult result;
1187 int fd;
1188
1189 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1190
1191 struct anv_semaphore_impl *impl =
1192 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1193 &semaphore->temporary : &semaphore->permanent;
1194
1195 switch (impl->type) {
1196 case ANV_SEMAPHORE_TYPE_BO:
1197 result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1198 if (result != VK_SUCCESS)
1199 return result;
1200 break;
1201
1202 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1203 /* There are two reasons why this could happen:
1204 *
1205 * 1) The user is trying to export without submitting something that
1206 * signals the semaphore. If this is the case, it's their bug so
1207 * what we return here doesn't matter.
1208 *
1209 * 2) The kernel didn't give us a file descriptor. The most likely
1210 * reason for this is running out of file descriptors.
1211 */
1212 if (impl->fd < 0)
1213 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1214
1215 *pFd = impl->fd;
1216
1217 /* From the Vulkan 1.0.53 spec:
1218 *
1219 * "...exporting a semaphore payload to a handle with copy
1220 * transference has the same side effects on the source
1221 * semaphore’s payload as executing a semaphore wait operation."
1222 *
1223 * In other words, it may still be a SYNC_FD semaphore, but it's now
1224 * considered to have been waited on and no longer has a sync file
1225 * attached.
1226 */
1227 impl->fd = -1;
1228 return VK_SUCCESS;
1229
1230 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1231 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
1232 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1233 else {
1234 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1235 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1236 }
1237 if (fd < 0)
1238 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1239 *pFd = fd;
1240 break;
1241
1242 default:
1243 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1244 }
1245
1246 /* From the Vulkan 1.0.53 spec:
1247 *
1248 * "Export operations have the same transference as the specified handle
1249 * type’s import operations. [...] If the semaphore was using a
1250 * temporarily imported payload, the semaphore’s prior permanent payload
1251 * will be restored.
1252 */
1253 if (impl == &semaphore->temporary)
1254 anv_semaphore_impl_cleanup(device, impl);
1255
1256 return VK_SUCCESS;
1257 }