2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * This file implements VkQueue, VkFence, and VkSemaphore
31 #include "anv_private.h"
34 #include "genxml/gen7_pack.h"
36 uint64_t anv_gettime_ns(void)
38 struct timespec current
;
39 clock_gettime(CLOCK_MONOTONIC
, ¤t
);
40 return (uint64_t)current
.tv_sec
* NSEC_PER_SEC
+ current
.tv_nsec
;
43 uint64_t anv_get_absolute_timeout(uint64_t timeout
)
47 uint64_t current_time
= anv_gettime_ns();
48 uint64_t max_timeout
= (uint64_t) INT64_MAX
- current_time
;
50 timeout
= MIN2(max_timeout
, timeout
);
52 return (current_time
+ timeout
);
55 static int64_t anv_get_relative_timeout(uint64_t abs_timeout
)
57 uint64_t now
= anv_gettime_ns();
59 /* We don't want negative timeouts.
61 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
62 * supposed to block indefinitely timeouts < 0. Unfortunately,
63 * this was broken for a couple of kernel releases. Since there's
64 * no way to know whether or not the kernel we're using is one of
65 * the broken ones, the best we can do is to clamp the timeout to
66 * INT64_MAX. This limits the maximum timeout from 584 years to
67 * 292 years - likely not a big deal.
69 if (abs_timeout
< now
)
72 uint64_t rel_timeout
= abs_timeout
- now
;
73 if (rel_timeout
> (uint64_t) INT64_MAX
)
74 rel_timeout
= INT64_MAX
;
79 static struct anv_semaphore
*anv_semaphore_ref(struct anv_semaphore
*semaphore
);
80 static void anv_semaphore_unref(struct anv_device
*device
, struct anv_semaphore
*semaphore
);
81 static void anv_semaphore_impl_cleanup(struct anv_device
*device
,
82 struct anv_semaphore_impl
*impl
);
85 anv_queue_submit_free(struct anv_device
*device
,
86 struct anv_queue_submit
*submit
)
88 const VkAllocationCallbacks
*alloc
= submit
->alloc
;
90 for (uint32_t i
= 0; i
< submit
->temporary_semaphore_count
; i
++)
91 anv_semaphore_impl_cleanup(device
, &submit
->temporary_semaphores
[i
]);
92 for (uint32_t i
= 0; i
< submit
->sync_fd_semaphore_count
; i
++)
93 anv_semaphore_unref(device
, submit
->sync_fd_semaphores
[i
]);
94 /* Execbuf does not consume the in_fence. It's our job to close it. */
95 if (submit
->in_fence
!= -1)
96 close(submit
->in_fence
);
97 if (submit
->out_fence
!= -1)
98 close(submit
->out_fence
);
99 vk_free(alloc
, submit
->fences
);
100 vk_free(alloc
, submit
->temporary_semaphores
);
101 vk_free(alloc
, submit
->fence_bos
);
102 vk_free(alloc
, submit
);
106 _anv_queue_submit(struct anv_queue
*queue
, struct anv_queue_submit
**_submit
)
108 struct anv_queue_submit
*submit
= *_submit
;
109 VkResult result
= anv_queue_execbuf(queue
, submit
);
111 if (result
== VK_SUCCESS
) {
112 /* Update signaled semaphores backed by syncfd. */
113 for (uint32_t i
= 0; i
< submit
->sync_fd_semaphore_count
; i
++) {
114 struct anv_semaphore
*semaphore
= submit
->sync_fd_semaphores
[i
];
115 /* Out fences can't have temporary state because that would imply
116 * that we imported a sync file and are trying to signal it.
118 assert(semaphore
->temporary
.type
== ANV_SEMAPHORE_TYPE_NONE
);
119 struct anv_semaphore_impl
*impl
= &semaphore
->permanent
;
121 assert(impl
->type
== ANV_SEMAPHORE_TYPE_SYNC_FILE
);
122 impl
->fd
= dup(submit
->out_fence
);
130 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
132 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
133 queue
->device
= device
;
140 anv_queue_finish(struct anv_queue
*queue
)
145 anv_queue_submit_add_fence_bo(struct anv_queue_submit
*submit
,
149 if (submit
->fence_bo_count
>= submit
->fence_bo_array_length
) {
150 uint32_t new_len
= MAX2(submit
->fence_bo_array_length
* 2, 64);
153 vk_realloc(submit
->alloc
,
154 submit
->fence_bos
, new_len
* sizeof(*submit
->fence_bos
),
155 8, submit
->alloc_scope
);
156 if (submit
->fence_bos
== NULL
)
157 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
159 submit
->fence_bo_array_length
= new_len
;
162 /* Take advantage that anv_bo are allocated at 8 byte alignement so we can
163 * use the lowest bit to store whether this is a BO we need to signal.
165 submit
->fence_bos
[submit
->fence_bo_count
++] = anv_pack_ptr(bo
, 1, signal
);
171 anv_queue_submit_add_syncobj(struct anv_queue_submit
* submit
,
172 struct anv_device
*device
,
173 uint32_t handle
, uint32_t flags
)
177 if (submit
->fence_count
>= submit
->fence_array_length
) {
178 uint32_t new_len
= MAX2(submit
->fence_array_length
* 2, 64);
181 vk_realloc(submit
->alloc
,
182 submit
->fences
, new_len
* sizeof(*submit
->fences
),
183 8, submit
->alloc_scope
);
184 if (submit
->fences
== NULL
)
185 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
187 submit
->fence_array_length
= new_len
;
190 submit
->fences
[submit
->fence_count
++] = (struct drm_i915_gem_exec_fence
) {
199 anv_queue_submit_add_sync_fd_fence(struct anv_queue_submit
*submit
,
200 struct anv_semaphore
*semaphore
)
202 if (submit
->sync_fd_semaphore_count
>= submit
->sync_fd_semaphore_array_length
) {
203 uint32_t new_len
= MAX2(submit
->sync_fd_semaphore_array_length
* 2, 64);
204 struct anv_semaphore
**new_semaphores
=
205 vk_realloc(submit
->alloc
, submit
->sync_fd_semaphores
,
206 new_len
* sizeof(*submit
->sync_fd_semaphores
), 8,
207 submit
->alloc_scope
);
208 if (new_semaphores
== NULL
)
209 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
211 submit
->sync_fd_semaphores
= new_semaphores
;
214 submit
->sync_fd_semaphores
[submit
->sync_fd_semaphore_count
++] =
215 anv_semaphore_ref(semaphore
);
216 submit
->need_out_fence
= true;
221 static struct anv_queue_submit
*
222 anv_queue_submit_alloc(struct anv_device
*device
)
224 const VkAllocationCallbacks
*alloc
= &device
->alloc
;
225 VkSystemAllocationScope alloc_scope
= VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
;
227 struct anv_queue_submit
*submit
= vk_zalloc(alloc
, sizeof(*submit
), 8, alloc_scope
);
231 submit
->alloc
= alloc
;
232 submit
->alloc_scope
= alloc_scope
;
233 submit
->in_fence
= -1;
234 submit
->out_fence
= -1;
240 anv_queue_submit_simple_batch(struct anv_queue
*queue
,
241 struct anv_batch
*batch
)
243 struct anv_device
*device
= queue
->device
;
244 struct anv_queue_submit
*submit
= anv_queue_submit_alloc(device
);
246 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
248 bool has_syncobj_wait
= device
->instance
->physicalDevice
.has_syncobj_wait
;
251 struct anv_bo
*batch_bo
, *sync_bo
;
253 if (has_syncobj_wait
) {
254 syncobj
= anv_gem_syncobj_create(device
, 0);
256 result
= vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
257 goto err_free_submit
;
260 result
= anv_queue_submit_add_syncobj(submit
, device
, syncobj
,
261 I915_EXEC_FENCE_SIGNAL
);
263 result
= anv_device_alloc_bo(device
, 4096,
264 ANV_BO_ALLOC_EXTERNAL
|
265 ANV_BO_ALLOC_IMPLICIT_SYNC
,
267 if (result
!= VK_SUCCESS
)
268 goto err_free_submit
;
270 result
= anv_queue_submit_add_fence_bo(submit
, sync_bo
, true /* signal */);
273 if (result
!= VK_SUCCESS
)
274 goto err_destroy_sync_primitive
;
277 uint32_t size
= align_u32(batch
->next
- batch
->start
, 8);
278 result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, size
, &batch_bo
);
279 if (result
!= VK_SUCCESS
)
280 goto err_destroy_sync_primitive
;
282 memcpy(batch_bo
->map
, batch
->start
, size
);
283 if (!device
->info
.has_llc
)
284 gen_flush_range(batch_bo
->map
, size
);
286 submit
->simple_bo
= batch_bo
;
287 submit
->simple_bo_size
= size
;
290 result
= _anv_queue_submit(queue
, &submit
);
292 if (result
== VK_SUCCESS
) {
293 if (has_syncobj_wait
) {
294 if (anv_gem_syncobj_wait(device
, &syncobj
, 1,
295 anv_get_absolute_timeout(INT64_MAX
), true))
296 result
= anv_device_set_lost(device
, "anv_gem_syncobj_wait failed: %m");
297 anv_gem_syncobj_destroy(device
, syncobj
);
299 result
= anv_device_wait(device
, sync_bo
,
300 anv_get_relative_timeout(INT64_MAX
));
301 anv_device_release_bo(device
, sync_bo
);
306 anv_bo_pool_free(&device
->batch_bo_pool
, batch_bo
);
309 anv_queue_submit_free(device
, submit
);
313 err_destroy_sync_primitive
:
314 if (has_syncobj_wait
)
315 anv_gem_syncobj_destroy(device
, syncobj
);
317 anv_device_release_bo(device
, sync_bo
);
320 anv_queue_submit_free(device
, submit
);
325 /* Transfer ownership of temporary semaphores from the VkSemaphore object to
326 * the anv_queue_submit object. Those temporary semaphores are then freed in
327 * anv_queue_submit_free() once the driver is finished with them.
330 maybe_transfer_temporary_semaphore(struct anv_queue_submit
*submit
,
331 struct anv_semaphore
*semaphore
,
332 struct anv_semaphore_impl
**out_impl
)
334 struct anv_semaphore_impl
*impl
= &semaphore
->temporary
;
336 if (impl
->type
== ANV_SEMAPHORE_TYPE_NONE
) {
337 *out_impl
= &semaphore
->permanent
;
342 * There is a requirement to reset semaphore to their permanent state after
343 * submission. From the Vulkan 1.0.53 spec:
345 * "If the import is temporary, the implementation must restore the
346 * semaphore to its prior permanent state after submitting the next
347 * semaphore wait operation."
349 * In the case we defer the actual submission to a thread because of the
350 * wait-before-submit behavior required for timeline semaphores, we need to
351 * make copies of the temporary syncobj to ensure they stay alive until we
352 * do the actual execbuffer ioctl.
354 if (submit
->temporary_semaphore_count
>= submit
->temporary_semaphore_array_length
) {
355 uint32_t new_len
= MAX2(submit
->temporary_semaphore_array_length
* 2, 8);
356 /* Make sure that if the realloc fails, we still have the old semaphore
357 * array around to properly clean things up on failure.
359 struct anv_semaphore_impl
*new_array
=
360 vk_realloc(submit
->alloc
,
361 submit
->temporary_semaphores
,
362 new_len
* sizeof(*submit
->temporary_semaphores
),
363 8, submit
->alloc_scope
);
364 if (new_array
== NULL
)
365 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
367 submit
->temporary_semaphores
= new_array
;
368 submit
->temporary_semaphore_array_length
= new_len
;
371 /* Copy anv_semaphore_impl into anv_queue_submit. */
372 submit
->temporary_semaphores
[submit
->temporary_semaphore_count
++] = *impl
;
373 *out_impl
= &submit
->temporary_semaphores
[submit
->temporary_semaphore_count
- 1];
375 /* Clear the incoming semaphore */
376 impl
->type
= ANV_SEMAPHORE_TYPE_NONE
;
382 anv_queue_submit(struct anv_queue
*queue
,
383 struct anv_cmd_buffer
*cmd_buffer
,
384 const VkSemaphore
*in_semaphores
,
385 uint32_t num_in_semaphores
,
386 const VkSemaphore
*out_semaphores
,
387 uint32_t num_out_semaphores
,
390 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
391 struct anv_device
*device
= queue
->device
;
392 UNUSED
struct anv_physical_device
*pdevice
= &device
->instance
->physicalDevice
;
393 struct anv_queue_submit
*submit
= anv_queue_submit_alloc(device
);
395 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
397 submit
->cmd_buffer
= cmd_buffer
;
399 VkResult result
= VK_SUCCESS
;
401 for (uint32_t i
= 0; i
< num_in_semaphores
; i
++) {
402 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, in_semaphores
[i
]);
403 struct anv_semaphore_impl
*impl
;
405 result
= maybe_transfer_temporary_semaphore(submit
, semaphore
, &impl
);
406 if (result
!= VK_SUCCESS
)
409 switch (impl
->type
) {
410 case ANV_SEMAPHORE_TYPE_BO
:
411 assert(!pdevice
->has_syncobj
);
412 result
= anv_queue_submit_add_fence_bo(submit
, impl
->bo
, false /* signal */);
413 if (result
!= VK_SUCCESS
)
417 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
418 assert(!pdevice
->has_syncobj
);
419 if (submit
->in_fence
== -1) {
420 submit
->in_fence
= impl
->fd
;
421 if (submit
->in_fence
== -1) {
422 result
= vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
427 int merge
= anv_gem_sync_file_merge(device
, submit
->in_fence
, impl
->fd
);
429 result
= vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
433 close(submit
->in_fence
);
435 submit
->in_fence
= merge
;
439 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
: {
440 result
= anv_queue_submit_add_syncobj(submit
, device
,
442 I915_EXEC_FENCE_WAIT
);
443 if (result
!= VK_SUCCESS
)
453 for (uint32_t i
= 0; i
< num_out_semaphores
; i
++) {
454 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, out_semaphores
[i
]);
456 /* Under most circumstances, out fences won't be temporary. However,
457 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
459 * "If the import is temporary, the implementation must restore the
460 * semaphore to its prior permanent state after submitting the next
461 * semaphore wait operation."
463 * The spec says nothing whatsoever about signal operations on
464 * temporarily imported semaphores so it appears they are allowed.
465 * There are also CTS tests that require this to work.
467 struct anv_semaphore_impl
*impl
=
468 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
469 &semaphore
->temporary
: &semaphore
->permanent
;
471 switch (impl
->type
) {
472 case ANV_SEMAPHORE_TYPE_BO
:
473 assert(!pdevice
->has_syncobj
);
474 result
= anv_queue_submit_add_fence_bo(submit
, impl
->bo
, true /* signal */);
475 if (result
!= VK_SUCCESS
)
479 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
480 assert(!pdevice
->has_syncobj
);
481 result
= anv_queue_submit_add_sync_fd_fence(submit
, semaphore
);
482 if (result
!= VK_SUCCESS
)
486 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
: {
487 result
= anv_queue_submit_add_syncobj(submit
, device
, impl
->syncobj
,
488 I915_EXEC_FENCE_SIGNAL
);
489 if (result
!= VK_SUCCESS
)
500 /* Under most circumstances, out fences won't be temporary. However,
501 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
503 * "If the import is temporary, the implementation must restore the
504 * semaphore to its prior permanent state after submitting the next
505 * semaphore wait operation."
507 * The spec says nothing whatsoever about signal operations on
508 * temporarily imported semaphores so it appears they are allowed.
509 * There are also CTS tests that require this to work.
511 struct anv_fence_impl
*impl
=
512 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
513 &fence
->temporary
: &fence
->permanent
;
515 switch (impl
->type
) {
516 case ANV_FENCE_TYPE_BO
:
517 result
= anv_queue_submit_add_fence_bo(submit
, impl
->bo
.bo
, true /* signal */);
518 if (result
!= VK_SUCCESS
)
522 case ANV_FENCE_TYPE_SYNCOBJ
: {
524 * For the same reason we reset the signaled binary syncobj above,
525 * also reset the fence's syncobj so that they don't contain a
526 * signaled dma-fence.
528 result
= anv_queue_submit_add_syncobj(submit
, device
, impl
->syncobj
,
529 I915_EXEC_FENCE_SIGNAL
);
530 if (result
!= VK_SUCCESS
)
536 unreachable("Invalid fence type");
540 result
= _anv_queue_submit(queue
, &submit
);
541 if (result
!= VK_SUCCESS
)
544 if (fence
&& fence
->permanent
.type
== ANV_FENCE_TYPE_BO
) {
545 /* BO fences can't be shared, so they can't be temporary. */
546 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
548 /* Once the execbuf has returned, we need to set the fence state to
549 * SUBMITTED. We can't do this before calling execbuf because
550 * anv_GetFenceStatus does take the global device lock before checking
553 * We set the fence state to SUBMITTED regardless of whether or not the
554 * execbuf succeeds because we need to ensure that vkWaitForFences() and
555 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
556 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
558 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_SUBMITTED
;
563 anv_queue_submit_free(device
, submit
);
568 VkResult
anv_QueueSubmit(
570 uint32_t submitCount
,
571 const VkSubmitInfo
* pSubmits
,
574 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
576 /* Query for device status prior to submitting. Technically, we don't need
577 * to do this. However, if we have a client that's submitting piles of
578 * garbage, we would rather break as early as possible to keep the GPU
579 * hanging contained. If we don't check here, we'll either be waiting for
580 * the kernel to kick us or we'll have to wait until the client waits on a
581 * fence before we actually know whether or not we've hung.
583 VkResult result
= anv_device_query_status(queue
->device
);
584 if (result
!= VK_SUCCESS
)
587 if (fence
&& submitCount
== 0) {
588 /* If we don't have any command buffers, we need to submit a dummy
589 * batch to give GEM something to wait on. We could, potentially,
590 * come up with something more efficient but this shouldn't be a
593 result
= anv_queue_submit(queue
, NULL
, NULL
, 0, NULL
, 0, fence
);
597 for (uint32_t i
= 0; i
< submitCount
; i
++) {
598 /* Fence for this submit. NULL for all but the last one */
599 VkFence submit_fence
= (i
== submitCount
- 1) ? fence
: VK_NULL_HANDLE
;
601 if (pSubmits
[i
].commandBufferCount
== 0) {
602 /* If we don't have any command buffers, we need to submit a dummy
603 * batch to give GEM something to wait on. We could, potentially,
604 * come up with something more efficient but this shouldn't be a
607 result
= anv_queue_submit(queue
, NULL
,
608 pSubmits
[i
].pWaitSemaphores
,
609 pSubmits
[i
].waitSemaphoreCount
,
610 pSubmits
[i
].pSignalSemaphores
,
611 pSubmits
[i
].signalSemaphoreCount
,
613 if (result
!= VK_SUCCESS
)
619 for (uint32_t j
= 0; j
< pSubmits
[i
].commandBufferCount
; j
++) {
620 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
,
621 pSubmits
[i
].pCommandBuffers
[j
]);
622 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
623 assert(!anv_batch_has_error(&cmd_buffer
->batch
));
625 /* Fence for this execbuf. NULL for all but the last one */
626 VkFence execbuf_fence
=
627 (j
== pSubmits
[i
].commandBufferCount
- 1) ?
628 submit_fence
: VK_NULL_HANDLE
;
630 const VkSemaphore
*in_semaphores
= NULL
, *out_semaphores
= NULL
;
631 uint32_t num_in_semaphores
= 0, num_out_semaphores
= 0;
633 /* Only the first batch gets the in semaphores */
634 in_semaphores
= pSubmits
[i
].pWaitSemaphores
;
635 num_in_semaphores
= pSubmits
[i
].waitSemaphoreCount
;
638 if (j
== pSubmits
[i
].commandBufferCount
- 1) {
639 /* Only the last batch gets the out semaphores */
640 out_semaphores
= pSubmits
[i
].pSignalSemaphores
;
641 num_out_semaphores
= pSubmits
[i
].signalSemaphoreCount
;
644 result
= anv_queue_submit(queue
, cmd_buffer
,
645 in_semaphores
, num_in_semaphores
,
646 out_semaphores
, num_out_semaphores
,
648 if (result
!= VK_SUCCESS
)
654 if (result
!= VK_SUCCESS
&& result
!= VK_ERROR_DEVICE_LOST
) {
655 /* In the case that something has gone wrong we may end up with an
656 * inconsistent state from which it may not be trivial to recover.
657 * For example, we might have computed address relocations and
658 * any future attempt to re-submit this job will need to know about
659 * this and avoid computing relocation addresses again.
661 * To avoid this sort of issues, we assume that if something was
662 * wrong during submission we must already be in a really bad situation
663 * anyway (such us being out of memory) and return
664 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
665 * submit the same job again to this device.
667 * We skip doing this on VK_ERROR_DEVICE_LOST because
668 * anv_device_set_lost() would have been called already by a callee of
669 * anv_queue_submit().
671 result
= anv_device_set_lost(queue
->device
, "vkQueueSubmit() failed");
677 VkResult
anv_QueueWaitIdle(
680 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
682 if (anv_device_is_lost(queue
->device
))
683 return VK_ERROR_DEVICE_LOST
;
685 return anv_queue_submit_simple_batch(queue
, NULL
);
688 VkResult
anv_CreateFence(
690 const VkFenceCreateInfo
* pCreateInfo
,
691 const VkAllocationCallbacks
* pAllocator
,
694 ANV_FROM_HANDLE(anv_device
, device
, _device
);
695 struct anv_fence
*fence
;
697 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
699 fence
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
700 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
702 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
704 if (device
->instance
->physicalDevice
.has_syncobj_wait
) {
705 fence
->permanent
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
707 uint32_t create_flags
= 0;
708 if (pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
)
709 create_flags
|= DRM_SYNCOBJ_CREATE_SIGNALED
;
711 fence
->permanent
.syncobj
= anv_gem_syncobj_create(device
, create_flags
);
712 if (!fence
->permanent
.syncobj
)
713 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
715 fence
->permanent
.type
= ANV_FENCE_TYPE_BO
;
717 VkResult result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, 4096,
718 &fence
->permanent
.bo
.bo
);
719 if (result
!= VK_SUCCESS
)
722 if (pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
) {
723 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
725 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_RESET
;
729 *pFence
= anv_fence_to_handle(fence
);
735 anv_fence_impl_cleanup(struct anv_device
*device
,
736 struct anv_fence_impl
*impl
)
738 switch (impl
->type
) {
739 case ANV_FENCE_TYPE_NONE
:
740 /* Dummy. Nothing to do */
743 case ANV_FENCE_TYPE_BO
:
744 anv_bo_pool_free(&device
->batch_bo_pool
, impl
->bo
.bo
);
747 case ANV_FENCE_TYPE_SYNCOBJ
:
748 anv_gem_syncobj_destroy(device
, impl
->syncobj
);
751 case ANV_FENCE_TYPE_WSI
:
752 impl
->fence_wsi
->destroy(impl
->fence_wsi
);
756 unreachable("Invalid fence type");
759 impl
->type
= ANV_FENCE_TYPE_NONE
;
762 void anv_DestroyFence(
765 const VkAllocationCallbacks
* pAllocator
)
767 ANV_FROM_HANDLE(anv_device
, device
, _device
);
768 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
773 anv_fence_impl_cleanup(device
, &fence
->temporary
);
774 anv_fence_impl_cleanup(device
, &fence
->permanent
);
776 vk_free2(&device
->alloc
, pAllocator
, fence
);
779 VkResult
anv_ResetFences(
782 const VkFence
* pFences
)
784 ANV_FROM_HANDLE(anv_device
, device
, _device
);
786 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
787 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
789 /* From the Vulkan 1.0.53 spec:
791 * "If any member of pFences currently has its payload imported with
792 * temporary permanence, that fence’s prior permanent payload is
793 * first restored. The remaining operations described therefore
794 * operate on the restored payload.
796 if (fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
)
797 anv_fence_impl_cleanup(device
, &fence
->temporary
);
799 struct anv_fence_impl
*impl
= &fence
->permanent
;
801 switch (impl
->type
) {
802 case ANV_FENCE_TYPE_BO
:
803 impl
->bo
.state
= ANV_BO_FENCE_STATE_RESET
;
806 case ANV_FENCE_TYPE_SYNCOBJ
:
807 anv_gem_syncobj_reset(device
, impl
->syncobj
);
811 unreachable("Invalid fence type");
818 VkResult
anv_GetFenceStatus(
822 ANV_FROM_HANDLE(anv_device
, device
, _device
);
823 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
825 if (anv_device_is_lost(device
))
826 return VK_ERROR_DEVICE_LOST
;
828 struct anv_fence_impl
*impl
=
829 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
830 &fence
->temporary
: &fence
->permanent
;
832 switch (impl
->type
) {
833 case ANV_FENCE_TYPE_BO
:
834 /* BO fences don't support import/export */
835 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
836 switch (impl
->bo
.state
) {
837 case ANV_BO_FENCE_STATE_RESET
:
838 /* If it hasn't even been sent off to the GPU yet, it's not ready */
841 case ANV_BO_FENCE_STATE_SIGNALED
:
842 /* It's been signaled, return success */
845 case ANV_BO_FENCE_STATE_SUBMITTED
: {
846 VkResult result
= anv_device_bo_busy(device
, impl
->bo
.bo
);
847 if (result
== VK_SUCCESS
) {
848 impl
->bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
855 unreachable("Invalid fence status");
858 case ANV_FENCE_TYPE_SYNCOBJ
: {
859 int ret
= anv_gem_syncobj_wait(device
, &impl
->syncobj
, 1, 0, true);
861 if (errno
== ETIME
) {
864 /* We don't know the real error. */
865 return anv_device_set_lost(device
, "drm_syncobj_wait failed: %m");
873 unreachable("Invalid fence type");
878 anv_wait_for_syncobj_fences(struct anv_device
*device
,
880 const VkFence
*pFences
,
882 uint64_t abs_timeout_ns
)
884 uint32_t *syncobjs
= vk_zalloc(&device
->alloc
,
885 sizeof(*syncobjs
) * fenceCount
, 8,
886 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
888 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
890 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
891 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
892 assert(fence
->permanent
.type
== ANV_FENCE_TYPE_SYNCOBJ
);
894 struct anv_fence_impl
*impl
=
895 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
896 &fence
->temporary
: &fence
->permanent
;
898 assert(impl
->type
== ANV_FENCE_TYPE_SYNCOBJ
);
899 syncobjs
[i
] = impl
->syncobj
;
902 /* The gem_syncobj_wait ioctl may return early due to an inherent
903 * limitation in the way it computes timeouts. Loop until we've actually
904 * passed the timeout.
908 ret
= anv_gem_syncobj_wait(device
, syncobjs
, fenceCount
,
909 abs_timeout_ns
, waitAll
);
910 } while (ret
== -1 && errno
== ETIME
&& anv_gettime_ns() < abs_timeout_ns
);
912 vk_free(&device
->alloc
, syncobjs
);
915 if (errno
== ETIME
) {
918 /* We don't know the real error. */
919 return anv_device_set_lost(device
, "drm_syncobj_wait failed: %m");
927 anv_wait_for_bo_fences(struct anv_device
*device
,
929 const VkFence
*pFences
,
931 uint64_t abs_timeout_ns
)
933 VkResult result
= VK_SUCCESS
;
934 uint32_t pending_fences
= fenceCount
;
935 while (pending_fences
) {
937 bool signaled_fences
= false;
938 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
939 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
941 /* This function assumes that all fences are BO fences and that they
942 * have no temporary state. Since BO fences will never be exported,
943 * this should be a safe assumption.
945 assert(fence
->permanent
.type
== ANV_FENCE_TYPE_BO
);
946 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
947 struct anv_fence_impl
*impl
= &fence
->permanent
;
949 switch (impl
->bo
.state
) {
950 case ANV_BO_FENCE_STATE_RESET
:
951 /* This fence hasn't been submitted yet, we'll catch it the next
952 * time around. Yes, this may mean we dead-loop but, short of
953 * lots of locking and a condition variable, there's not much that
954 * we can do about that.
959 case ANV_BO_FENCE_STATE_SIGNALED
:
960 /* This fence is not pending. If waitAll isn't set, we can return
961 * early. Otherwise, we have to keep going.
969 case ANV_BO_FENCE_STATE_SUBMITTED
:
970 /* These are the fences we really care about. Go ahead and wait
971 * on it until we hit a timeout.
973 result
= anv_device_wait(device
, impl
->bo
.bo
,
974 anv_get_relative_timeout(abs_timeout_ns
));
977 impl
->bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
978 signaled_fences
= true;
992 if (pending_fences
&& !signaled_fences
) {
993 /* If we've hit this then someone decided to vkWaitForFences before
994 * they've actually submitted any of them to a queue. This is a
995 * fairly pessimal case, so it's ok to lock here and use a standard
996 * pthreads condition variable.
998 pthread_mutex_lock(&device
->mutex
);
1000 /* It's possible that some of the fences have changed state since the
1001 * last time we checked. Now that we have the lock, check for
1002 * pending fences again and don't wait if it's changed.
1004 uint32_t now_pending_fences
= 0;
1005 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1006 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1007 if (fence
->permanent
.bo
.state
== ANV_BO_FENCE_STATE_RESET
)
1008 now_pending_fences
++;
1010 assert(now_pending_fences
<= pending_fences
);
1012 if (now_pending_fences
== pending_fences
) {
1013 struct timespec abstime
= {
1014 .tv_sec
= abs_timeout_ns
/ NSEC_PER_SEC
,
1015 .tv_nsec
= abs_timeout_ns
% NSEC_PER_SEC
,
1019 ret
= pthread_cond_timedwait(&device
->queue_submit
,
1020 &device
->mutex
, &abstime
);
1021 assert(ret
!= EINVAL
);
1022 if (anv_gettime_ns() >= abs_timeout_ns
) {
1023 pthread_mutex_unlock(&device
->mutex
);
1024 result
= VK_TIMEOUT
;
1029 pthread_mutex_unlock(&device
->mutex
);
1034 if (anv_device_is_lost(device
))
1035 return VK_ERROR_DEVICE_LOST
;
1041 anv_wait_for_wsi_fence(struct anv_device
*device
,
1042 const VkFence _fence
,
1043 uint64_t abs_timeout
)
1045 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1046 struct anv_fence_impl
*impl
= &fence
->permanent
;
1048 return impl
->fence_wsi
->wait(impl
->fence_wsi
, abs_timeout
);
1052 anv_wait_for_fences(struct anv_device
*device
,
1053 uint32_t fenceCount
,
1054 const VkFence
*pFences
,
1056 uint64_t abs_timeout
)
1058 VkResult result
= VK_SUCCESS
;
1060 if (fenceCount
<= 1 || waitAll
) {
1061 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1062 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1063 switch (fence
->permanent
.type
) {
1064 case ANV_FENCE_TYPE_BO
:
1065 result
= anv_wait_for_bo_fences(device
, 1, &pFences
[i
],
1068 case ANV_FENCE_TYPE_SYNCOBJ
:
1069 result
= anv_wait_for_syncobj_fences(device
, 1, &pFences
[i
],
1072 case ANV_FENCE_TYPE_WSI
:
1073 result
= anv_wait_for_wsi_fence(device
, pFences
[i
], abs_timeout
);
1075 case ANV_FENCE_TYPE_NONE
:
1076 result
= VK_SUCCESS
;
1079 if (result
!= VK_SUCCESS
)
1084 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1085 if (anv_wait_for_fences(device
, 1, &pFences
[i
], true, 0) == VK_SUCCESS
)
1088 } while (anv_gettime_ns() < abs_timeout
);
1089 result
= VK_TIMEOUT
;
1094 static bool anv_all_fences_syncobj(uint32_t fenceCount
, const VkFence
*pFences
)
1096 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
1097 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1098 if (fence
->permanent
.type
!= ANV_FENCE_TYPE_SYNCOBJ
)
1104 static bool anv_all_fences_bo(uint32_t fenceCount
, const VkFence
*pFences
)
1106 for (uint32_t i
= 0; i
< fenceCount
; ++i
) {
1107 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1108 if (fence
->permanent
.type
!= ANV_FENCE_TYPE_BO
)
1114 VkResult
anv_WaitForFences(
1116 uint32_t fenceCount
,
1117 const VkFence
* pFences
,
1121 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1123 if (anv_device_is_lost(device
))
1124 return VK_ERROR_DEVICE_LOST
;
1126 uint64_t abs_timeout
= anv_get_absolute_timeout(timeout
);
1127 if (anv_all_fences_syncobj(fenceCount
, pFences
)) {
1128 return anv_wait_for_syncobj_fences(device
, fenceCount
, pFences
,
1129 waitAll
, abs_timeout
);
1130 } else if (anv_all_fences_bo(fenceCount
, pFences
)) {
1131 return anv_wait_for_bo_fences(device
, fenceCount
, pFences
,
1132 waitAll
, abs_timeout
);
1134 return anv_wait_for_fences(device
, fenceCount
, pFences
,
1135 waitAll
, abs_timeout
);
1139 void anv_GetPhysicalDeviceExternalFenceProperties(
1140 VkPhysicalDevice physicalDevice
,
1141 const VkPhysicalDeviceExternalFenceInfo
* pExternalFenceInfo
,
1142 VkExternalFenceProperties
* pExternalFenceProperties
)
1144 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
1146 switch (pExternalFenceInfo
->handleType
) {
1147 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
:
1148 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
:
1149 if (device
->has_syncobj_wait
) {
1150 pExternalFenceProperties
->exportFromImportedHandleTypes
=
1151 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
|
1152 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
;
1153 pExternalFenceProperties
->compatibleHandleTypes
=
1154 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
|
1155 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
;
1156 pExternalFenceProperties
->externalFenceFeatures
=
1157 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT
|
1158 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT
;
1167 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
1168 pExternalFenceProperties
->compatibleHandleTypes
= 0;
1169 pExternalFenceProperties
->externalFenceFeatures
= 0;
1172 VkResult
anv_ImportFenceFdKHR(
1174 const VkImportFenceFdInfoKHR
* pImportFenceFdInfo
)
1176 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1177 ANV_FROM_HANDLE(anv_fence
, fence
, pImportFenceFdInfo
->fence
);
1178 int fd
= pImportFenceFdInfo
->fd
;
1180 assert(pImportFenceFdInfo
->sType
==
1181 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR
);
1183 struct anv_fence_impl new_impl
= {
1184 .type
= ANV_FENCE_TYPE_NONE
,
1187 switch (pImportFenceFdInfo
->handleType
) {
1188 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
:
1189 new_impl
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
1191 new_impl
.syncobj
= anv_gem_syncobj_fd_to_handle(device
, fd
);
1192 if (!new_impl
.syncobj
)
1193 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1197 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
:
1198 /* Sync files are a bit tricky. Because we want to continue using the
1199 * syncobj implementation of WaitForFences, we don't use the sync file
1200 * directly but instead import it into a syncobj.
1202 new_impl
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
1204 new_impl
.syncobj
= anv_gem_syncobj_create(device
, 0);
1205 if (!new_impl
.syncobj
)
1206 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1208 if (anv_gem_syncobj_import_sync_file(device
, new_impl
.syncobj
, fd
)) {
1209 anv_gem_syncobj_destroy(device
, new_impl
.syncobj
);
1210 return vk_errorf(device
->instance
, NULL
,
1211 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1212 "syncobj sync file import failed: %m");
1217 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1220 /* From the Vulkan 1.0.53 spec:
1222 * "Importing a fence payload from a file descriptor transfers
1223 * ownership of the file descriptor from the application to the
1224 * Vulkan implementation. The application must not perform any
1225 * operations on the file descriptor after a successful import."
1227 * If the import fails, we leave the file descriptor open.
1231 if (pImportFenceFdInfo
->flags
& VK_FENCE_IMPORT_TEMPORARY_BIT
) {
1232 anv_fence_impl_cleanup(device
, &fence
->temporary
);
1233 fence
->temporary
= new_impl
;
1235 anv_fence_impl_cleanup(device
, &fence
->permanent
);
1236 fence
->permanent
= new_impl
;
1242 VkResult
anv_GetFenceFdKHR(
1244 const VkFenceGetFdInfoKHR
* pGetFdInfo
,
1247 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1248 ANV_FROM_HANDLE(anv_fence
, fence
, pGetFdInfo
->fence
);
1250 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR
);
1252 struct anv_fence_impl
*impl
=
1253 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
1254 &fence
->temporary
: &fence
->permanent
;
1256 assert(impl
->type
== ANV_FENCE_TYPE_SYNCOBJ
);
1257 switch (pGetFdInfo
->handleType
) {
1258 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
: {
1259 int fd
= anv_gem_syncobj_handle_to_fd(device
, impl
->syncobj
);
1261 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1267 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
: {
1268 int fd
= anv_gem_syncobj_export_sync_file(device
, impl
->syncobj
);
1270 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1277 unreachable("Invalid fence export handle type");
1280 /* From the Vulkan 1.0.53 spec:
1282 * "Export operations have the same transference as the specified handle
1283 * type’s import operations. [...] If the fence was using a
1284 * temporarily imported payload, the fence’s prior permanent payload
1287 if (impl
== &fence
->temporary
)
1288 anv_fence_impl_cleanup(device
, impl
);
1293 // Queue semaphore functions
1295 VkResult
anv_CreateSemaphore(
1297 const VkSemaphoreCreateInfo
* pCreateInfo
,
1298 const VkAllocationCallbacks
* pAllocator
,
1299 VkSemaphore
* pSemaphore
)
1301 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1302 struct anv_semaphore
*semaphore
;
1304 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
);
1306 semaphore
= vk_alloc(&device
->alloc
, sizeof(*semaphore
), 8,
1307 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1308 if (semaphore
== NULL
)
1309 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1311 p_atomic_set(&semaphore
->refcount
, 1);
1313 const VkExportSemaphoreCreateInfo
*export
=
1314 vk_find_struct_const(pCreateInfo
->pNext
, EXPORT_SEMAPHORE_CREATE_INFO
);
1315 VkExternalSemaphoreHandleTypeFlags handleTypes
=
1316 export
? export
->handleTypes
: 0;
1318 if (handleTypes
== 0) {
1319 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
1320 * on the same ring. Since we don't expose the blit engine as a DMA
1321 * queue, a dummy no-op semaphore is a perfectly valid implementation.
1323 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DUMMY
;
1324 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
) {
1325 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
);
1326 if (device
->instance
->physicalDevice
.has_syncobj
) {
1327 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
1328 semaphore
->permanent
.syncobj
= anv_gem_syncobj_create(device
, 0);
1329 if (!semaphore
->permanent
.syncobj
) {
1330 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
1331 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1334 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_BO
;
1335 VkResult result
= anv_device_alloc_bo(device
, 4096,
1336 ANV_BO_ALLOC_EXTERNAL
|
1337 ANV_BO_ALLOC_IMPLICIT_SYNC
,
1338 &semaphore
->permanent
.bo
);
1339 if (result
!= VK_SUCCESS
) {
1340 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
1344 /* If we're going to use this as a fence, we need to *not* have the
1345 * EXEC_OBJECT_ASYNC bit set.
1347 assert(!(semaphore
->permanent
.bo
->flags
& EXEC_OBJECT_ASYNC
));
1349 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
) {
1350 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
);
1351 if (device
->instance
->physicalDevice
.has_syncobj
) {
1352 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
1353 semaphore
->permanent
.syncobj
= anv_gem_syncobj_create(device
, 0);
1355 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
;
1356 semaphore
->permanent
.fd
= -1;
1359 assert(!"Unknown handle type");
1360 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
1361 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1364 semaphore
->temporary
.type
= ANV_SEMAPHORE_TYPE_NONE
;
1366 *pSemaphore
= anv_semaphore_to_handle(semaphore
);
1372 anv_semaphore_impl_cleanup(struct anv_device
*device
,
1373 struct anv_semaphore_impl
*impl
)
1375 switch (impl
->type
) {
1376 case ANV_SEMAPHORE_TYPE_NONE
:
1377 case ANV_SEMAPHORE_TYPE_DUMMY
:
1378 /* Dummy. Nothing to do */
1381 case ANV_SEMAPHORE_TYPE_BO
:
1382 anv_device_release_bo(device
, impl
->bo
);
1385 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
1389 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
1390 anv_gem_syncobj_destroy(device
, impl
->syncobj
);
1394 unreachable("Invalid semaphore type");
1397 impl
->type
= ANV_SEMAPHORE_TYPE_NONE
;
1401 anv_semaphore_reset_temporary(struct anv_device
*device
,
1402 struct anv_semaphore
*semaphore
)
1404 if (semaphore
->temporary
.type
== ANV_SEMAPHORE_TYPE_NONE
)
1407 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
1410 static struct anv_semaphore
*
1411 anv_semaphore_ref(struct anv_semaphore
*semaphore
)
1413 assert(semaphore
->refcount
);
1414 p_atomic_inc(&semaphore
->refcount
);
1419 anv_semaphore_unref(struct anv_device
*device
, struct anv_semaphore
*semaphore
)
1421 if (!p_atomic_dec_zero(&semaphore
->refcount
))
1424 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
1425 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
1426 vk_free(&device
->alloc
, semaphore
);
1429 void anv_DestroySemaphore(
1431 VkSemaphore _semaphore
,
1432 const VkAllocationCallbacks
* pAllocator
)
1434 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1435 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, _semaphore
);
1437 if (semaphore
== NULL
)
1440 anv_semaphore_unref(device
, semaphore
);
1443 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1444 VkPhysicalDevice physicalDevice
,
1445 const VkPhysicalDeviceExternalSemaphoreInfo
* pExternalSemaphoreInfo
,
1446 VkExternalSemaphoreProperties
* pExternalSemaphoreProperties
)
1448 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
1450 switch (pExternalSemaphoreInfo
->handleType
) {
1451 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
1452 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
=
1453 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
;
1454 pExternalSemaphoreProperties
->compatibleHandleTypes
=
1455 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
;
1456 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
1457 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
1458 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
1461 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
1462 if (device
->has_exec_fence
) {
1463 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
=
1464 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
1465 pExternalSemaphoreProperties
->compatibleHandleTypes
=
1466 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
1467 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
1468 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
1469 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
1478 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
1479 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1480 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1483 VkResult
anv_ImportSemaphoreFdKHR(
1485 const VkImportSemaphoreFdInfoKHR
* pImportSemaphoreFdInfo
)
1487 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1488 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pImportSemaphoreFdInfo
->semaphore
);
1489 int fd
= pImportSemaphoreFdInfo
->fd
;
1491 struct anv_semaphore_impl new_impl
= {
1492 .type
= ANV_SEMAPHORE_TYPE_NONE
,
1495 switch (pImportSemaphoreFdInfo
->handleType
) {
1496 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
1497 if (device
->instance
->physicalDevice
.has_syncobj
) {
1498 new_impl
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
1500 new_impl
.syncobj
= anv_gem_syncobj_fd_to_handle(device
, fd
);
1501 if (!new_impl
.syncobj
)
1502 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1504 new_impl
.type
= ANV_SEMAPHORE_TYPE_BO
;
1506 VkResult result
= anv_device_import_bo(device
, fd
,
1507 ANV_BO_ALLOC_EXTERNAL
|
1508 ANV_BO_ALLOC_IMPLICIT_SYNC
,
1510 if (result
!= VK_SUCCESS
)
1513 if (new_impl
.bo
->size
< 4096) {
1514 anv_device_release_bo(device
, new_impl
.bo
);
1515 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1518 /* If we're going to use this as a fence, we need to *not* have the
1519 * EXEC_OBJECT_ASYNC bit set.
1521 assert(!(new_impl
.bo
->flags
& EXEC_OBJECT_ASYNC
));
1524 /* From the Vulkan spec:
1526 * "Importing semaphore state from a file descriptor transfers
1527 * ownership of the file descriptor from the application to the
1528 * Vulkan implementation. The application must not perform any
1529 * operations on the file descriptor after a successful import."
1531 * If the import fails, we leave the file descriptor open.
1536 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
1537 if (device
->instance
->physicalDevice
.has_syncobj
) {
1538 new_impl
= (struct anv_semaphore_impl
) {
1539 .type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
,
1540 .syncobj
= anv_gem_syncobj_create(device
, 0),
1542 if (!new_impl
.syncobj
)
1543 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1544 if (anv_gem_syncobj_import_sync_file(device
, new_impl
.syncobj
, fd
)) {
1545 anv_gem_syncobj_destroy(device
, new_impl
.syncobj
);
1546 return vk_errorf(device
->instance
, NULL
,
1547 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
1548 "syncobj sync file import failed: %m");
1550 /* Ownership of the FD is transfered to Anv. Since we don't need it
1551 * anymore because the associated fence has been put into a syncobj,
1552 * we must close the FD.
1556 new_impl
= (struct anv_semaphore_impl
) {
1557 .type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
,
1564 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1567 if (pImportSemaphoreFdInfo
->flags
& VK_SEMAPHORE_IMPORT_TEMPORARY_BIT
) {
1568 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
1569 semaphore
->temporary
= new_impl
;
1571 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
1572 semaphore
->permanent
= new_impl
;
1578 VkResult
anv_GetSemaphoreFdKHR(
1580 const VkSemaphoreGetFdInfoKHR
* pGetFdInfo
,
1583 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1584 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pGetFdInfo
->semaphore
);
1588 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR
);
1590 struct anv_semaphore_impl
*impl
=
1591 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
1592 &semaphore
->temporary
: &semaphore
->permanent
;
1594 switch (impl
->type
) {
1595 case ANV_SEMAPHORE_TYPE_BO
:
1596 result
= anv_device_export_bo(device
, impl
->bo
, pFd
);
1597 if (result
!= VK_SUCCESS
)
1601 case ANV_SEMAPHORE_TYPE_SYNC_FILE
: {
1602 /* There's a potential race here with vkQueueSubmit if you are trying
1603 * to export a semaphore Fd while the queue submit is still happening.
1604 * This can happen if we see all dependencies get resolved via timeline
1605 * semaphore waits completing before the execbuf completes and we
1606 * process the resulting out fence. To work around this, take a lock
1607 * around grabbing the fd.
1609 pthread_mutex_lock(&device
->mutex
);
1611 /* From the Vulkan 1.0.53 spec:
1613 * "...exporting a semaphore payload to a handle with copy
1614 * transference has the same side effects on the source
1615 * semaphore’s payload as executing a semaphore wait operation."
1617 * In other words, it may still be a SYNC_FD semaphore, but it's now
1618 * considered to have been waited on and no longer has a sync file
1624 pthread_mutex_unlock(&device
->mutex
);
1626 /* There are two reasons why this could happen:
1628 * 1) The user is trying to export without submitting something that
1629 * signals the semaphore. If this is the case, it's their bug so
1630 * what we return here doesn't matter.
1632 * 2) The kernel didn't give us a file descriptor. The most likely
1633 * reason for this is running out of file descriptors.
1636 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1642 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
1643 if (pGetFdInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
)
1644 fd
= anv_gem_syncobj_export_sync_file(device
, impl
->syncobj
);
1646 assert(pGetFdInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
);
1647 fd
= anv_gem_syncobj_handle_to_fd(device
, impl
->syncobj
);
1650 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1655 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1658 /* From the Vulkan 1.0.53 spec:
1660 * "Export operations have the same transference as the specified handle
1661 * type’s import operations. [...] If the semaphore was using a
1662 * temporarily imported payload, the semaphore’s prior permanent payload
1665 if (impl
== &semaphore
->temporary
)
1666 anv_semaphore_impl_cleanup(device
, impl
);