2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * This file implements VkQueue, VkFence, and VkSemaphore
30 #include <sys/eventfd.h>
32 #include "anv_private.h"
35 #include "genxml/gen7_pack.h"
38 anv_device_execbuf(struct anv_device
*device
,
39 struct drm_i915_gem_execbuffer2
*execbuf
,
40 struct anv_bo
**execbuf_bos
)
42 int ret
= anv_gem_execbuffer(device
, execbuf
);
44 /* We don't know the real error. */
46 return vk_errorf(VK_ERROR_DEVICE_LOST
, "execbuf2 failed: %m");
49 struct drm_i915_gem_exec_object2
*objects
=
50 (void *)(uintptr_t)execbuf
->buffers_ptr
;
51 for (uint32_t k
= 0; k
< execbuf
->buffer_count
; k
++)
52 execbuf_bos
[k
]->offset
= objects
[k
].offset
;
58 anv_device_submit_simple_batch(struct anv_device
*device
,
59 struct anv_batch
*batch
)
61 struct drm_i915_gem_execbuffer2 execbuf
;
62 struct drm_i915_gem_exec_object2 exec2_objects
[1];
63 struct anv_bo bo
, *exec_bos
[1];
64 VkResult result
= VK_SUCCESS
;
67 /* Kernel driver requires 8 byte aligned batch length */
68 size
= align_u32(batch
->next
- batch
->start
, 8);
69 result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, &bo
, size
);
70 if (result
!= VK_SUCCESS
)
73 memcpy(bo
.map
, batch
->start
, size
);
74 if (!device
->info
.has_llc
)
75 gen_flush_range(bo
.map
, size
);
78 exec2_objects
[0].handle
= bo
.gem_handle
;
79 exec2_objects
[0].relocation_count
= 0;
80 exec2_objects
[0].relocs_ptr
= 0;
81 exec2_objects
[0].alignment
= 0;
82 exec2_objects
[0].offset
= bo
.offset
;
83 exec2_objects
[0].flags
= 0;
84 exec2_objects
[0].rsvd1
= 0;
85 exec2_objects
[0].rsvd2
= 0;
87 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
88 execbuf
.buffer_count
= 1;
89 execbuf
.batch_start_offset
= 0;
90 execbuf
.batch_len
= size
;
91 execbuf
.cliprects_ptr
= 0;
92 execbuf
.num_cliprects
= 0;
97 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
98 execbuf
.rsvd1
= device
->context_id
;
101 result
= anv_device_execbuf(device
, &execbuf
, exec_bos
);
102 if (result
!= VK_SUCCESS
)
105 result
= anv_device_wait(device
, &bo
, INT64_MAX
);
108 anv_bo_pool_free(&device
->batch_bo_pool
, &bo
);
113 VkResult
anv_QueueSubmit(
115 uint32_t submitCount
,
116 const VkSubmitInfo
* pSubmits
,
119 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
120 struct anv_device
*device
= queue
->device
;
122 /* Query for device status prior to submitting. Technically, we don't need
123 * to do this. However, if we have a client that's submitting piles of
124 * garbage, we would rather break as early as possible to keep the GPU
125 * hanging contained. If we don't check here, we'll either be waiting for
126 * the kernel to kick us or we'll have to wait until the client waits on a
127 * fence before we actually know whether or not we've hung.
129 VkResult result
= anv_device_query_status(device
);
130 if (result
!= VK_SUCCESS
)
133 /* We lock around QueueSubmit for three main reasons:
135 * 1) When a block pool is resized, we create a new gem handle with a
136 * different size and, in the case of surface states, possibly a
137 * different center offset but we re-use the same anv_bo struct when
138 * we do so. If this happens in the middle of setting up an execbuf,
139 * we could end up with our list of BOs out of sync with our list of
142 * 2) The algorithm we use for building the list of unique buffers isn't
143 * thread-safe. While the client is supposed to syncronize around
144 * QueueSubmit, this would be extremely difficult to debug if it ever
145 * came up in the wild due to a broken app. It's better to play it
146 * safe and just lock around QueueSubmit.
148 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
149 * userspace. Due to the fact that the surface state buffer is shared
150 * between batches, we can't afford to have that happen from multiple
151 * threads at the same time. Even though the user is supposed to
152 * ensure this doesn't happen, we play it safe as in (2) above.
154 * Since the only other things that ever take the device lock such as block
155 * pool resize only rarely happen, this will almost never be contended so
156 * taking a lock isn't really an expensive operation in this case.
158 pthread_mutex_lock(&device
->mutex
);
160 if (fence
&& submitCount
== 0) {
161 /* If we don't have any command buffers, we need to submit a dummy
162 * batch to give GEM something to wait on. We could, potentially,
163 * come up with something more efficient but this shouldn't be a
166 result
= anv_cmd_buffer_execbuf(device
, NULL
, NULL
, 0, NULL
, 0, fence
);
170 for (uint32_t i
= 0; i
< submitCount
; i
++) {
171 /* Fence for this submit. NULL for all but the last one */
172 VkFence submit_fence
= (i
== submitCount
- 1) ? fence
: NULL
;
174 if (pSubmits
[i
].commandBufferCount
== 0) {
175 /* If we don't have any command buffers, we need to submit a dummy
176 * batch to give GEM something to wait on. We could, potentially,
177 * come up with something more efficient but this shouldn't be a
180 result
= anv_cmd_buffer_execbuf(device
, NULL
,
181 pSubmits
[i
].pWaitSemaphores
,
182 pSubmits
[i
].waitSemaphoreCount
,
183 pSubmits
[i
].pSignalSemaphores
,
184 pSubmits
[i
].signalSemaphoreCount
,
186 if (result
!= VK_SUCCESS
)
192 for (uint32_t j
= 0; j
< pSubmits
[i
].commandBufferCount
; j
++) {
193 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
,
194 pSubmits
[i
].pCommandBuffers
[j
]);
195 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
196 assert(!anv_batch_has_error(&cmd_buffer
->batch
));
198 /* Fence for this execbuf. NULL for all but the last one */
199 VkFence execbuf_fence
=
200 (j
== pSubmits
[i
].commandBufferCount
- 1) ? submit_fence
: NULL
;
202 const VkSemaphore
*in_semaphores
= NULL
, *out_semaphores
= NULL
;
203 uint32_t num_in_semaphores
= 0, num_out_semaphores
= 0;
205 /* Only the first batch gets the in semaphores */
206 in_semaphores
= pSubmits
[i
].pWaitSemaphores
;
207 num_in_semaphores
= pSubmits
[i
].waitSemaphoreCount
;
210 if (j
== pSubmits
[i
].commandBufferCount
- 1) {
211 /* Only the last batch gets the out semaphores */
212 out_semaphores
= pSubmits
[i
].pSignalSemaphores
;
213 num_out_semaphores
= pSubmits
[i
].signalSemaphoreCount
;
216 result
= anv_cmd_buffer_execbuf(device
, cmd_buffer
,
217 in_semaphores
, num_in_semaphores
,
218 out_semaphores
, num_out_semaphores
,
220 if (result
!= VK_SUCCESS
)
225 pthread_cond_broadcast(&device
->queue_submit
);
228 if (result
!= VK_SUCCESS
) {
229 /* In the case that something has gone wrong we may end up with an
230 * inconsistent state from which it may not be trivial to recover.
231 * For example, we might have computed address relocations and
232 * any future attempt to re-submit this job will need to know about
233 * this and avoid computing relocation addresses again.
235 * To avoid this sort of issues, we assume that if something was
236 * wrong during submission we must already be in a really bad situation
237 * anyway (such us being out of memory) and return
238 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
239 * submit the same job again to this device.
241 result
= vk_errorf(VK_ERROR_DEVICE_LOST
, "vkQueueSubmit() failed");
245 pthread_mutex_unlock(&device
->mutex
);
250 VkResult
anv_QueueWaitIdle(
253 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
255 return anv_DeviceWaitIdle(anv_device_to_handle(queue
->device
));
258 VkResult
anv_CreateFence(
260 const VkFenceCreateInfo
* pCreateInfo
,
261 const VkAllocationCallbacks
* pAllocator
,
264 ANV_FROM_HANDLE(anv_device
, device
, _device
);
265 struct anv_bo fence_bo
;
266 struct anv_fence
*fence
;
268 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
270 VkResult result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, &fence_bo
, 4096);
271 if (result
!= VK_SUCCESS
)
274 /* Fences are small. Just store the CPU data structure in the BO. */
275 fence
= fence_bo
.map
;
276 fence
->bo
= fence_bo
;
278 if (pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
) {
279 fence
->state
= ANV_FENCE_STATE_SIGNALED
;
281 fence
->state
= ANV_FENCE_STATE_RESET
;
284 *pFence
= anv_fence_to_handle(fence
);
289 void anv_DestroyFence(
292 const VkAllocationCallbacks
* pAllocator
)
294 ANV_FROM_HANDLE(anv_device
, device
, _device
);
295 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
300 assert(fence
->bo
.map
== fence
);
301 anv_bo_pool_free(&device
->batch_bo_pool
, &fence
->bo
);
304 VkResult
anv_ResetFences(
307 const VkFence
* pFences
)
309 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
310 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
311 fence
->state
= ANV_FENCE_STATE_RESET
;
317 VkResult
anv_GetFenceStatus(
321 ANV_FROM_HANDLE(anv_device
, device
, _device
);
322 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
324 if (unlikely(device
->lost
))
325 return VK_ERROR_DEVICE_LOST
;
327 switch (fence
->state
) {
328 case ANV_FENCE_STATE_RESET
:
329 /* If it hasn't even been sent off to the GPU yet, it's not ready */
332 case ANV_FENCE_STATE_SIGNALED
:
333 /* It's been signaled, return success */
336 case ANV_FENCE_STATE_SUBMITTED
: {
337 VkResult result
= anv_device_bo_busy(device
, &fence
->bo
);
338 if (result
== VK_SUCCESS
) {
339 fence
->state
= ANV_FENCE_STATE_SIGNALED
;
346 unreachable("Invalid fence status");
350 #define NSEC_PER_SEC 1000000000
351 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
353 VkResult
anv_WaitForFences(
356 const VkFence
* pFences
,
360 ANV_FROM_HANDLE(anv_device
, device
, _device
);
363 if (unlikely(device
->lost
))
364 return VK_ERROR_DEVICE_LOST
;
366 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
367 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
368 * for a couple of kernel releases. Since there's no way to know
369 * whether or not the kernel we're using is one of the broken ones, the
370 * best we can do is to clamp the timeout to INT64_MAX. This limits the
371 * maximum timeout from 584 years to 292 years - likely not a big deal.
373 int64_t timeout
= MIN2(_timeout
, INT64_MAX
);
375 VkResult result
= VK_SUCCESS
;
376 uint32_t pending_fences
= fenceCount
;
377 while (pending_fences
) {
379 bool signaled_fences
= false;
380 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
381 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
382 switch (fence
->state
) {
383 case ANV_FENCE_STATE_RESET
:
384 /* This fence hasn't been submitted yet, we'll catch it the next
385 * time around. Yes, this may mean we dead-loop but, short of
386 * lots of locking and a condition variable, there's not much that
387 * we can do about that.
392 case ANV_FENCE_STATE_SIGNALED
:
393 /* This fence is not pending. If waitAll isn't set, we can return
394 * early. Otherwise, we have to keep going.
402 case ANV_FENCE_STATE_SUBMITTED
:
403 /* These are the fences we really care about. Go ahead and wait
404 * on it until we hit a timeout.
406 result
= anv_device_wait(device
, &fence
->bo
, timeout
);
409 fence
->state
= ANV_FENCE_STATE_SIGNALED
;
410 signaled_fences
= true;
424 if (pending_fences
&& !signaled_fences
) {
425 /* If we've hit this then someone decided to vkWaitForFences before
426 * they've actually submitted any of them to a queue. This is a
427 * fairly pessimal case, so it's ok to lock here and use a standard
428 * pthreads condition variable.
430 pthread_mutex_lock(&device
->mutex
);
432 /* It's possible that some of the fences have changed state since the
433 * last time we checked. Now that we have the lock, check for
434 * pending fences again and don't wait if it's changed.
436 uint32_t now_pending_fences
= 0;
437 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
438 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
439 if (fence
->state
== ANV_FENCE_STATE_RESET
)
440 now_pending_fences
++;
442 assert(now_pending_fences
<= pending_fences
);
444 if (now_pending_fences
== pending_fences
) {
445 struct timespec before
;
446 clock_gettime(CLOCK_MONOTONIC
, &before
);
448 uint32_t abs_nsec
= before
.tv_nsec
+ timeout
% NSEC_PER_SEC
;
449 uint64_t abs_sec
= before
.tv_sec
+ (abs_nsec
/ NSEC_PER_SEC
) +
450 (timeout
/ NSEC_PER_SEC
);
451 abs_nsec
%= NSEC_PER_SEC
;
453 /* Avoid roll-over in tv_sec on 32-bit systems if the user
454 * provided timeout is UINT64_MAX
456 struct timespec abstime
;
457 abstime
.tv_nsec
= abs_nsec
;
458 abstime
.tv_sec
= MIN2(abs_sec
, INT_TYPE_MAX(abstime
.tv_sec
));
460 ret
= pthread_cond_timedwait(&device
->queue_submit
,
461 &device
->mutex
, &abstime
);
462 assert(ret
!= EINVAL
);
464 struct timespec after
;
465 clock_gettime(CLOCK_MONOTONIC
, &after
);
466 uint64_t time_elapsed
=
467 ((uint64_t)after
.tv_sec
* NSEC_PER_SEC
+ after
.tv_nsec
) -
468 ((uint64_t)before
.tv_sec
* NSEC_PER_SEC
+ before
.tv_nsec
);
470 if (time_elapsed
>= timeout
) {
471 pthread_mutex_unlock(&device
->mutex
);
476 timeout
-= time_elapsed
;
479 pthread_mutex_unlock(&device
->mutex
);
484 if (unlikely(device
->lost
))
485 return VK_ERROR_DEVICE_LOST
;
490 // Queue semaphore functions
492 VkResult
anv_CreateSemaphore(
494 const VkSemaphoreCreateInfo
* pCreateInfo
,
495 const VkAllocationCallbacks
* pAllocator
,
496 VkSemaphore
* pSemaphore
)
498 ANV_FROM_HANDLE(anv_device
, device
, _device
);
499 struct anv_semaphore
*semaphore
;
501 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
);
503 semaphore
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*semaphore
), 8,
504 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
505 if (semaphore
== NULL
)
506 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
508 const VkExportSemaphoreCreateInfoKHR
*export
=
509 vk_find_struct_const(pCreateInfo
->pNext
, EXPORT_SEMAPHORE_CREATE_INFO_KHR
);
510 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes
=
511 export
? export
->handleTypes
: 0;
513 if (handleTypes
== 0) {
514 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
515 * on the same ring. Since we don't expose the blit engine as a DMA
516 * queue, a dummy no-op semaphore is a perfectly valid implementation.
518 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DUMMY
;
519 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
) {
520 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
);
521 if (device
->instance
->physicalDevice
.has_syncobj
) {
522 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
523 semaphore
->permanent
.syncobj
= anv_gem_syncobj_create(device
);
524 if (!semaphore
->permanent
.syncobj
) {
525 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
526 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
529 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_BO
;
530 VkResult result
= anv_bo_cache_alloc(device
, &device
->bo_cache
,
531 4096, &semaphore
->permanent
.bo
);
532 if (result
!= VK_SUCCESS
) {
533 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
537 /* If we're going to use this as a fence, we need to *not* have the
538 * EXEC_OBJECT_ASYNC bit set.
540 assert(!(semaphore
->permanent
.bo
->flags
& EXEC_OBJECT_ASYNC
));
542 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
) {
543 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
);
545 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
;
546 semaphore
->permanent
.fd
= -1;
548 assert(!"Unknown handle type");
549 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
550 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
553 semaphore
->temporary
.type
= ANV_SEMAPHORE_TYPE_NONE
;
555 *pSemaphore
= anv_semaphore_to_handle(semaphore
);
561 anv_semaphore_impl_cleanup(struct anv_device
*device
,
562 struct anv_semaphore_impl
*impl
)
564 switch (impl
->type
) {
565 case ANV_SEMAPHORE_TYPE_NONE
:
566 case ANV_SEMAPHORE_TYPE_DUMMY
:
567 /* Dummy. Nothing to do */
570 case ANV_SEMAPHORE_TYPE_BO
:
571 anv_bo_cache_release(device
, &device
->bo_cache
, impl
->bo
);
574 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
578 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
579 anv_gem_syncobj_destroy(device
, impl
->syncobj
);
583 unreachable("Invalid semaphore type");
587 anv_semaphore_reset_temporary(struct anv_device
*device
,
588 struct anv_semaphore
*semaphore
)
590 if (semaphore
->temporary
.type
== ANV_SEMAPHORE_TYPE_NONE
)
593 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
594 semaphore
->temporary
.type
= ANV_SEMAPHORE_TYPE_NONE
;
597 void anv_DestroySemaphore(
599 VkSemaphore _semaphore
,
600 const VkAllocationCallbacks
* pAllocator
)
602 ANV_FROM_HANDLE(anv_device
, device
, _device
);
603 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, _semaphore
);
605 if (semaphore
== NULL
)
608 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
609 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
611 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
614 void anv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
615 VkPhysicalDevice physicalDevice
,
616 const VkPhysicalDeviceExternalSemaphoreInfoKHR
* pExternalSemaphoreInfo
,
617 VkExternalSemaphorePropertiesKHR
* pExternalSemaphoreProperties
)
619 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
621 switch (pExternalSemaphoreInfo
->handleType
) {
622 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
:
623 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
=
624 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
;
625 pExternalSemaphoreProperties
->compatibleHandleTypes
=
626 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
;
627 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
628 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR
|
629 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR
;
632 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
:
633 if (device
->has_exec_fence
) {
634 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
635 pExternalSemaphoreProperties
->compatibleHandleTypes
=
636 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
;
637 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
638 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR
|
639 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR
;
648 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
649 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
650 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
653 VkResult
anv_ImportSemaphoreFdKHR(
655 const VkImportSemaphoreFdInfoKHR
* pImportSemaphoreFdInfo
)
657 ANV_FROM_HANDLE(anv_device
, device
, _device
);
658 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pImportSemaphoreFdInfo
->semaphore
);
659 int fd
= pImportSemaphoreFdInfo
->fd
;
661 struct anv_semaphore_impl new_impl
= {
662 .type
= ANV_SEMAPHORE_TYPE_NONE
,
665 switch (pImportSemaphoreFdInfo
->handleType
) {
666 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR
:
667 if (device
->instance
->physicalDevice
.has_syncobj
) {
668 new_impl
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
670 new_impl
.syncobj
= anv_gem_syncobj_fd_to_handle(device
, fd
);
671 if (!new_impl
.syncobj
)
672 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
674 /* From the Vulkan spec:
676 * "Importing semaphore state from a file descriptor transfers
677 * ownership of the file descriptor from the application to the
678 * Vulkan implementation. The application must not perform any
679 * operations on the file descriptor after a successful import."
681 * If the import fails, we leave the file descriptor open.
683 close(pImportSemaphoreFdInfo
->fd
);
685 new_impl
.type
= ANV_SEMAPHORE_TYPE_BO
;
687 VkResult result
= anv_bo_cache_import(device
, &device
->bo_cache
,
688 fd
, 4096, &new_impl
.bo
);
689 if (result
!= VK_SUCCESS
)
692 /* If we're going to use this as a fence, we need to *not* have the
693 * EXEC_OBJECT_ASYNC bit set.
695 assert(!(new_impl
.bo
->flags
& EXEC_OBJECT_ASYNC
));
699 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR
:
700 new_impl
= (struct anv_semaphore_impl
) {
701 .type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
,
707 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
710 if (pImportSemaphoreFdInfo
->flags
& VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR
) {
711 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
712 semaphore
->temporary
= new_impl
;
714 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
715 semaphore
->permanent
= new_impl
;
721 VkResult
anv_GetSemaphoreFdKHR(
723 const VkSemaphoreGetFdInfoKHR
* pGetFdInfo
,
726 ANV_FROM_HANDLE(anv_device
, device
, _device
);
727 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pGetFdInfo
->semaphore
);
731 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR
);
733 struct anv_semaphore_impl
*impl
=
734 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
735 &semaphore
->temporary
: &semaphore
->permanent
;
737 switch (impl
->type
) {
738 case ANV_SEMAPHORE_TYPE_BO
:
739 result
= anv_bo_cache_export(device
, &device
->bo_cache
, impl
->bo
, pFd
);
740 if (result
!= VK_SUCCESS
)
744 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
745 /* There are two reasons why this could happen:
747 * 1) The user is trying to export without submitting something that
748 * signals the semaphore. If this is the case, it's their bug so
749 * what we return here doesn't matter.
751 * 2) The kernel didn't give us a file descriptor. The most likely
752 * reason for this is running out of file descriptors.
755 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
759 /* From the Vulkan 1.0.53 spec:
761 * "...exporting a semaphore payload to a handle with copy
762 * transference has the same side effects on the source
763 * semaphore’s payload as executing a semaphore wait operation."
765 * In other words, it may still be a SYNC_FD semaphore, but it's now
766 * considered to have been waited on and no longer has a sync file
772 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
773 fd
= anv_gem_syncobj_handle_to_fd(device
, impl
->syncobj
);
775 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
780 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
783 /* From the Vulkan 1.0.53 spec:
785 * "Export operations have the same transference as the specified handle
786 * type’s import operations. [...] If the semaphore was using a
787 * temporarily imported payload, the semaphore’s prior permanent payload
790 if (impl
== &semaphore
->temporary
)
791 anv_semaphore_impl_cleanup(device
, impl
);