2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * This file implements VkQueue, VkFence, and VkSemaphore
30 #include <sys/eventfd.h>
32 #include "anv_private.h"
35 #include "genxml/gen7_pack.h"
38 anv_device_execbuf(struct anv_device
*device
,
39 struct drm_i915_gem_execbuffer2
*execbuf
,
40 struct anv_bo
**execbuf_bos
)
42 int ret
= device
->no_hw
? 0 : anv_gem_execbuffer(device
, execbuf
);
44 /* We don't know the real error. */
46 return vk_errorf(device
->instance
, device
, VK_ERROR_DEVICE_LOST
,
47 "execbuf2 failed: %m");
50 struct drm_i915_gem_exec_object2
*objects
=
51 (void *)(uintptr_t)execbuf
->buffers_ptr
;
52 for (uint32_t k
= 0; k
< execbuf
->buffer_count
; k
++)
53 execbuf_bos
[k
]->offset
= objects
[k
].offset
;
59 anv_device_submit_simple_batch(struct anv_device
*device
,
60 struct anv_batch
*batch
)
62 struct drm_i915_gem_execbuffer2 execbuf
;
63 struct drm_i915_gem_exec_object2 exec2_objects
[1];
64 struct anv_bo bo
, *exec_bos
[1];
65 VkResult result
= VK_SUCCESS
;
68 /* Kernel driver requires 8 byte aligned batch length */
69 size
= align_u32(batch
->next
- batch
->start
, 8);
70 result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, &bo
, size
);
71 if (result
!= VK_SUCCESS
)
74 memcpy(bo
.map
, batch
->start
, size
);
75 if (!device
->info
.has_llc
)
76 gen_flush_range(bo
.map
, size
);
79 exec2_objects
[0].handle
= bo
.gem_handle
;
80 exec2_objects
[0].relocation_count
= 0;
81 exec2_objects
[0].relocs_ptr
= 0;
82 exec2_objects
[0].alignment
= 0;
83 exec2_objects
[0].offset
= bo
.offset
;
84 exec2_objects
[0].flags
= 0;
85 exec2_objects
[0].rsvd1
= 0;
86 exec2_objects
[0].rsvd2
= 0;
88 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
89 execbuf
.buffer_count
= 1;
90 execbuf
.batch_start_offset
= 0;
91 execbuf
.batch_len
= size
;
92 execbuf
.cliprects_ptr
= 0;
93 execbuf
.num_cliprects
= 0;
98 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
99 execbuf
.rsvd1
= device
->context_id
;
102 result
= anv_device_execbuf(device
, &execbuf
, exec_bos
);
103 if (result
!= VK_SUCCESS
)
106 result
= anv_device_wait(device
, &bo
, INT64_MAX
);
109 anv_bo_pool_free(&device
->batch_bo_pool
, &bo
);
114 VkResult
anv_QueueSubmit(
116 uint32_t submitCount
,
117 const VkSubmitInfo
* pSubmits
,
120 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
121 struct anv_device
*device
= queue
->device
;
123 /* Query for device status prior to submitting. Technically, we don't need
124 * to do this. However, if we have a client that's submitting piles of
125 * garbage, we would rather break as early as possible to keep the GPU
126 * hanging contained. If we don't check here, we'll either be waiting for
127 * the kernel to kick us or we'll have to wait until the client waits on a
128 * fence before we actually know whether or not we've hung.
130 VkResult result
= anv_device_query_status(device
);
131 if (result
!= VK_SUCCESS
)
134 /* We lock around QueueSubmit for three main reasons:
136 * 1) When a block pool is resized, we create a new gem handle with a
137 * different size and, in the case of surface states, possibly a
138 * different center offset but we re-use the same anv_bo struct when
139 * we do so. If this happens in the middle of setting up an execbuf,
140 * we could end up with our list of BOs out of sync with our list of
143 * 2) The algorithm we use for building the list of unique buffers isn't
144 * thread-safe. While the client is supposed to syncronize around
145 * QueueSubmit, this would be extremely difficult to debug if it ever
146 * came up in the wild due to a broken app. It's better to play it
147 * safe and just lock around QueueSubmit.
149 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
150 * userspace. Due to the fact that the surface state buffer is shared
151 * between batches, we can't afford to have that happen from multiple
152 * threads at the same time. Even though the user is supposed to
153 * ensure this doesn't happen, we play it safe as in (2) above.
155 * Since the only other things that ever take the device lock such as block
156 * pool resize only rarely happen, this will almost never be contended so
157 * taking a lock isn't really an expensive operation in this case.
159 pthread_mutex_lock(&device
->mutex
);
161 if (fence
&& submitCount
== 0) {
162 /* If we don't have any command buffers, we need to submit a dummy
163 * batch to give GEM something to wait on. We could, potentially,
164 * come up with something more efficient but this shouldn't be a
167 result
= anv_cmd_buffer_execbuf(device
, NULL
, NULL
, 0, NULL
, 0, fence
);
171 for (uint32_t i
= 0; i
< submitCount
; i
++) {
172 /* Fence for this submit. NULL for all but the last one */
173 VkFence submit_fence
= (i
== submitCount
- 1) ? fence
: VK_NULL_HANDLE
;
175 if (pSubmits
[i
].commandBufferCount
== 0) {
176 /* If we don't have any command buffers, we need to submit a dummy
177 * batch to give GEM something to wait on. We could, potentially,
178 * come up with something more efficient but this shouldn't be a
181 result
= anv_cmd_buffer_execbuf(device
, NULL
,
182 pSubmits
[i
].pWaitSemaphores
,
183 pSubmits
[i
].waitSemaphoreCount
,
184 pSubmits
[i
].pSignalSemaphores
,
185 pSubmits
[i
].signalSemaphoreCount
,
187 if (result
!= VK_SUCCESS
)
193 for (uint32_t j
= 0; j
< pSubmits
[i
].commandBufferCount
; j
++) {
194 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
,
195 pSubmits
[i
].pCommandBuffers
[j
]);
196 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
197 assert(!anv_batch_has_error(&cmd_buffer
->batch
));
199 /* Fence for this execbuf. NULL for all but the last one */
200 VkFence execbuf_fence
=
201 (j
== pSubmits
[i
].commandBufferCount
- 1) ?
202 submit_fence
: VK_NULL_HANDLE
;
204 const VkSemaphore
*in_semaphores
= NULL
, *out_semaphores
= NULL
;
205 uint32_t num_in_semaphores
= 0, num_out_semaphores
= 0;
207 /* Only the first batch gets the in semaphores */
208 in_semaphores
= pSubmits
[i
].pWaitSemaphores
;
209 num_in_semaphores
= pSubmits
[i
].waitSemaphoreCount
;
212 if (j
== pSubmits
[i
].commandBufferCount
- 1) {
213 /* Only the last batch gets the out semaphores */
214 out_semaphores
= pSubmits
[i
].pSignalSemaphores
;
215 num_out_semaphores
= pSubmits
[i
].signalSemaphoreCount
;
218 result
= anv_cmd_buffer_execbuf(device
, cmd_buffer
,
219 in_semaphores
, num_in_semaphores
,
220 out_semaphores
, num_out_semaphores
,
222 if (result
!= VK_SUCCESS
)
227 pthread_cond_broadcast(&device
->queue_submit
);
230 if (result
!= VK_SUCCESS
) {
231 /* In the case that something has gone wrong we may end up with an
232 * inconsistent state from which it may not be trivial to recover.
233 * For example, we might have computed address relocations and
234 * any future attempt to re-submit this job will need to know about
235 * this and avoid computing relocation addresses again.
237 * To avoid this sort of issues, we assume that if something was
238 * wrong during submission we must already be in a really bad situation
239 * anyway (such us being out of memory) and return
240 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
241 * submit the same job again to this device.
243 result
= vk_errorf(device
->instance
, device
, VK_ERROR_DEVICE_LOST
,
244 "vkQueueSubmit() failed");
248 pthread_mutex_unlock(&device
->mutex
);
253 VkResult
anv_QueueWaitIdle(
256 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
258 return anv_DeviceWaitIdle(anv_device_to_handle(queue
->device
));
261 VkResult
anv_CreateFence(
263 const VkFenceCreateInfo
* pCreateInfo
,
264 const VkAllocationCallbacks
* pAllocator
,
267 ANV_FROM_HANDLE(anv_device
, device
, _device
);
268 struct anv_fence
*fence
;
270 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
272 fence
= vk_zalloc2(&device
->alloc
, pAllocator
, sizeof(*fence
), 8,
273 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
275 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
277 if (device
->instance
->physicalDevice
.has_syncobj_wait
) {
278 fence
->permanent
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
280 uint32_t create_flags
= 0;
281 if (pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
)
282 create_flags
|= DRM_SYNCOBJ_CREATE_SIGNALED
;
284 fence
->permanent
.syncobj
= anv_gem_syncobj_create(device
, create_flags
);
285 if (!fence
->permanent
.syncobj
)
286 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
288 fence
->permanent
.type
= ANV_FENCE_TYPE_BO
;
290 VkResult result
= anv_bo_pool_alloc(&device
->batch_bo_pool
,
291 &fence
->permanent
.bo
.bo
, 4096);
292 if (result
!= VK_SUCCESS
)
295 if (pCreateInfo
->flags
& VK_FENCE_CREATE_SIGNALED_BIT
) {
296 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
298 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_RESET
;
302 *pFence
= anv_fence_to_handle(fence
);
308 anv_fence_impl_cleanup(struct anv_device
*device
,
309 struct anv_fence_impl
*impl
)
311 switch (impl
->type
) {
312 case ANV_FENCE_TYPE_NONE
:
313 /* Dummy. Nothing to do */
316 case ANV_FENCE_TYPE_BO
:
317 anv_bo_pool_free(&device
->batch_bo_pool
, &impl
->bo
.bo
);
320 case ANV_FENCE_TYPE_SYNCOBJ
:
321 anv_gem_syncobj_destroy(device
, impl
->syncobj
);
325 unreachable("Invalid fence type");
328 void anv_DestroyFence(
331 const VkAllocationCallbacks
* pAllocator
)
333 ANV_FROM_HANDLE(anv_device
, device
, _device
);
334 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
339 anv_fence_impl_cleanup(device
, &fence
->temporary
);
340 anv_fence_impl_cleanup(device
, &fence
->permanent
);
342 vk_free2(&device
->alloc
, pAllocator
, fence
);
345 VkResult
anv_ResetFences(
348 const VkFence
* pFences
)
350 ANV_FROM_HANDLE(anv_device
, device
, _device
);
352 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
353 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
355 /* From the Vulkan 1.0.53 spec:
357 * "If any member of pFences currently has its payload imported with
358 * temporary permanence, that fence’s prior permanent payload is
359 * first restored. The remaining operations described therefore
360 * operate on the restored payload.
362 if (fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
) {
363 anv_fence_impl_cleanup(device
, &fence
->temporary
);
364 fence
->temporary
.type
= ANV_FENCE_TYPE_NONE
;
367 struct anv_fence_impl
*impl
= &fence
->permanent
;
369 switch (impl
->type
) {
370 case ANV_FENCE_TYPE_BO
:
371 impl
->bo
.state
= ANV_BO_FENCE_STATE_RESET
;
374 case ANV_FENCE_TYPE_SYNCOBJ
:
375 anv_gem_syncobj_reset(device
, impl
->syncobj
);
379 unreachable("Invalid fence type");
386 VkResult
anv_GetFenceStatus(
390 ANV_FROM_HANDLE(anv_device
, device
, _device
);
391 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
393 if (unlikely(device
->lost
))
394 return VK_ERROR_DEVICE_LOST
;
396 struct anv_fence_impl
*impl
=
397 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
398 &fence
->temporary
: &fence
->permanent
;
400 switch (impl
->type
) {
401 case ANV_FENCE_TYPE_BO
:
402 /* BO fences don't support import/export */
403 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
404 switch (impl
->bo
.state
) {
405 case ANV_BO_FENCE_STATE_RESET
:
406 /* If it hasn't even been sent off to the GPU yet, it's not ready */
409 case ANV_BO_FENCE_STATE_SIGNALED
:
410 /* It's been signaled, return success */
413 case ANV_BO_FENCE_STATE_SUBMITTED
: {
414 VkResult result
= anv_device_bo_busy(device
, &impl
->bo
.bo
);
415 if (result
== VK_SUCCESS
) {
416 impl
->bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
423 unreachable("Invalid fence status");
426 case ANV_FENCE_TYPE_SYNCOBJ
: {
427 int ret
= anv_gem_syncobj_wait(device
, &impl
->syncobj
, 1, 0, true);
429 if (errno
== ETIME
) {
432 /* We don't know the real error. */
434 return vk_errorf(device
->instance
, device
, VK_ERROR_DEVICE_LOST
,
435 "drm_syncobj_wait failed: %m");
443 unreachable("Invalid fence type");
447 #define NSEC_PER_SEC 1000000000
448 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
453 struct timespec current
;
454 clock_gettime(CLOCK_MONOTONIC
, ¤t
);
455 return (uint64_t)current
.tv_sec
* NSEC_PER_SEC
+ current
.tv_nsec
;
459 anv_wait_for_syncobj_fences(struct anv_device
*device
,
461 const VkFence
*pFences
,
465 uint32_t *syncobjs
= vk_zalloc(&device
->alloc
,
466 sizeof(*syncobjs
) * fenceCount
, 8,
467 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
469 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
471 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
472 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
473 assert(fence
->permanent
.type
== ANV_FENCE_TYPE_SYNCOBJ
);
475 struct anv_fence_impl
*impl
=
476 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
477 &fence
->temporary
: &fence
->permanent
;
479 assert(impl
->type
== ANV_FENCE_TYPE_SYNCOBJ
);
480 syncobjs
[i
] = impl
->syncobj
;
483 int64_t abs_timeout_ns
= 0;
485 uint64_t current_ns
= gettime_ns();
487 /* Add but saturate to INT32_MAX */
488 if (current_ns
+ _timeout
< current_ns
)
489 abs_timeout_ns
= INT64_MAX
;
490 else if (current_ns
+ _timeout
> INT64_MAX
)
491 abs_timeout_ns
= INT64_MAX
;
493 abs_timeout_ns
= current_ns
+ _timeout
;
496 /* The gem_syncobj_wait ioctl may return early due to an inherent
497 * limitation in the way it computes timeouts. Loop until we've actually
498 * passed the timeout.
502 ret
= anv_gem_syncobj_wait(device
, syncobjs
, fenceCount
,
503 abs_timeout_ns
, waitAll
);
504 } while (ret
== -1 && errno
== ETIME
&& gettime_ns() < abs_timeout_ns
);
506 vk_free(&device
->alloc
, syncobjs
);
509 if (errno
== ETIME
) {
512 /* We don't know the real error. */
514 return vk_errorf(device
->instance
, device
, VK_ERROR_DEVICE_LOST
,
515 "drm_syncobj_wait failed: %m");
523 anv_wait_for_bo_fences(struct anv_device
*device
,
525 const VkFence
*pFences
,
531 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
532 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
533 * for a couple of kernel releases. Since there's no way to know
534 * whether or not the kernel we're using is one of the broken ones, the
535 * best we can do is to clamp the timeout to INT64_MAX. This limits the
536 * maximum timeout from 584 years to 292 years - likely not a big deal.
538 int64_t timeout
= MIN2(_timeout
, INT64_MAX
);
540 VkResult result
= VK_SUCCESS
;
541 uint32_t pending_fences
= fenceCount
;
542 while (pending_fences
) {
544 bool signaled_fences
= false;
545 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
546 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
548 /* This function assumes that all fences are BO fences and that they
549 * have no temporary state. Since BO fences will never be exported,
550 * this should be a safe assumption.
552 assert(fence
->permanent
.type
== ANV_FENCE_TYPE_BO
);
553 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
554 struct anv_fence_impl
*impl
= &fence
->permanent
;
556 switch (impl
->bo
.state
) {
557 case ANV_BO_FENCE_STATE_RESET
:
558 /* This fence hasn't been submitted yet, we'll catch it the next
559 * time around. Yes, this may mean we dead-loop but, short of
560 * lots of locking and a condition variable, there's not much that
561 * we can do about that.
566 case ANV_BO_FENCE_STATE_SIGNALED
:
567 /* This fence is not pending. If waitAll isn't set, we can return
568 * early. Otherwise, we have to keep going.
576 case ANV_BO_FENCE_STATE_SUBMITTED
:
577 /* These are the fences we really care about. Go ahead and wait
578 * on it until we hit a timeout.
580 result
= anv_device_wait(device
, &impl
->bo
.bo
, timeout
);
583 impl
->bo
.state
= ANV_BO_FENCE_STATE_SIGNALED
;
584 signaled_fences
= true;
598 if (pending_fences
&& !signaled_fences
) {
599 /* If we've hit this then someone decided to vkWaitForFences before
600 * they've actually submitted any of them to a queue. This is a
601 * fairly pessimal case, so it's ok to lock here and use a standard
602 * pthreads condition variable.
604 pthread_mutex_lock(&device
->mutex
);
606 /* It's possible that some of the fences have changed state since the
607 * last time we checked. Now that we have the lock, check for
608 * pending fences again and don't wait if it's changed.
610 uint32_t now_pending_fences
= 0;
611 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
612 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
613 if (fence
->permanent
.bo
.state
== ANV_BO_FENCE_STATE_RESET
)
614 now_pending_fences
++;
616 assert(now_pending_fences
<= pending_fences
);
618 if (now_pending_fences
== pending_fences
) {
619 struct timespec before
;
620 clock_gettime(CLOCK_MONOTONIC
, &before
);
622 uint32_t abs_nsec
= before
.tv_nsec
+ timeout
% NSEC_PER_SEC
;
623 uint64_t abs_sec
= before
.tv_sec
+ (abs_nsec
/ NSEC_PER_SEC
) +
624 (timeout
/ NSEC_PER_SEC
);
625 abs_nsec
%= NSEC_PER_SEC
;
627 /* Avoid roll-over in tv_sec on 32-bit systems if the user
628 * provided timeout is UINT64_MAX
630 struct timespec abstime
;
631 abstime
.tv_nsec
= abs_nsec
;
632 abstime
.tv_sec
= MIN2(abs_sec
, INT_TYPE_MAX(abstime
.tv_sec
));
634 ret
= pthread_cond_timedwait(&device
->queue_submit
,
635 &device
->mutex
, &abstime
);
636 assert(ret
!= EINVAL
);
638 struct timespec after
;
639 clock_gettime(CLOCK_MONOTONIC
, &after
);
640 uint64_t time_elapsed
=
641 ((uint64_t)after
.tv_sec
* NSEC_PER_SEC
+ after
.tv_nsec
) -
642 ((uint64_t)before
.tv_sec
* NSEC_PER_SEC
+ before
.tv_nsec
);
644 if (time_elapsed
>= timeout
) {
645 pthread_mutex_unlock(&device
->mutex
);
650 timeout
-= time_elapsed
;
653 pthread_mutex_unlock(&device
->mutex
);
658 if (unlikely(device
->lost
))
659 return VK_ERROR_DEVICE_LOST
;
664 VkResult
anv_WaitForFences(
667 const VkFence
* pFences
,
671 ANV_FROM_HANDLE(anv_device
, device
, _device
);
673 if (unlikely(device
->lost
))
674 return VK_ERROR_DEVICE_LOST
;
676 if (device
->instance
->physicalDevice
.has_syncobj_wait
) {
677 return anv_wait_for_syncobj_fences(device
, fenceCount
, pFences
,
680 return anv_wait_for_bo_fences(device
, fenceCount
, pFences
,
685 void anv_GetPhysicalDeviceExternalFenceProperties(
686 VkPhysicalDevice physicalDevice
,
687 const VkPhysicalDeviceExternalFenceInfoKHR
* pExternalFenceInfo
,
688 VkExternalFencePropertiesKHR
* pExternalFenceProperties
)
690 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
692 switch (pExternalFenceInfo
->handleType
) {
693 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
:
694 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
:
695 if (device
->has_syncobj_wait
) {
696 pExternalFenceProperties
->exportFromImportedHandleTypes
=
697 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
|
698 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
;
699 pExternalFenceProperties
->compatibleHandleTypes
=
700 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
|
701 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
;
702 pExternalFenceProperties
->externalFenceFeatures
=
703 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT
|
704 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT
;
713 pExternalFenceProperties
->exportFromImportedHandleTypes
= 0;
714 pExternalFenceProperties
->compatibleHandleTypes
= 0;
715 pExternalFenceProperties
->externalFenceFeatures
= 0;
718 VkResult
anv_ImportFenceFdKHR(
720 const VkImportFenceFdInfoKHR
* pImportFenceFdInfo
)
722 ANV_FROM_HANDLE(anv_device
, device
, _device
);
723 ANV_FROM_HANDLE(anv_fence
, fence
, pImportFenceFdInfo
->fence
);
724 int fd
= pImportFenceFdInfo
->fd
;
726 assert(pImportFenceFdInfo
->sType
==
727 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR
);
729 struct anv_fence_impl new_impl
= {
730 .type
= ANV_FENCE_TYPE_NONE
,
733 switch (pImportFenceFdInfo
->handleType
) {
734 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
:
735 new_impl
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
737 new_impl
.syncobj
= anv_gem_syncobj_fd_to_handle(device
, fd
);
738 if (!new_impl
.syncobj
)
739 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
743 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
:
744 /* Sync files are a bit tricky. Because we want to continue using the
745 * syncobj implementation of WaitForFences, we don't use the sync file
746 * directly but instead import it into a syncobj.
748 new_impl
.type
= ANV_FENCE_TYPE_SYNCOBJ
;
750 new_impl
.syncobj
= anv_gem_syncobj_create(device
, 0);
751 if (!new_impl
.syncobj
)
752 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
754 if (anv_gem_syncobj_import_sync_file(device
, new_impl
.syncobj
, fd
)) {
755 anv_gem_syncobj_destroy(device
, new_impl
.syncobj
);
756 return vk_errorf(device
->instance
, NULL
,
757 VK_ERROR_INVALID_EXTERNAL_HANDLE
,
758 "syncobj sync file import failed: %m");
763 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
766 /* From the Vulkan 1.0.53 spec:
768 * "Importing a fence payload from a file descriptor transfers
769 * ownership of the file descriptor from the application to the
770 * Vulkan implementation. The application must not perform any
771 * operations on the file descriptor after a successful import."
773 * If the import fails, we leave the file descriptor open.
777 if (pImportFenceFdInfo
->flags
& VK_FENCE_IMPORT_TEMPORARY_BIT
) {
778 anv_fence_impl_cleanup(device
, &fence
->temporary
);
779 fence
->temporary
= new_impl
;
781 anv_fence_impl_cleanup(device
, &fence
->permanent
);
782 fence
->permanent
= new_impl
;
788 VkResult
anv_GetFenceFdKHR(
790 const VkFenceGetFdInfoKHR
* pGetFdInfo
,
793 ANV_FROM_HANDLE(anv_device
, device
, _device
);
794 ANV_FROM_HANDLE(anv_fence
, fence
, pGetFdInfo
->fence
);
796 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR
);
798 struct anv_fence_impl
*impl
=
799 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
800 &fence
->temporary
: &fence
->permanent
;
802 assert(impl
->type
== ANV_FENCE_TYPE_SYNCOBJ
);
803 switch (pGetFdInfo
->handleType
) {
804 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT
: {
805 int fd
= anv_gem_syncobj_handle_to_fd(device
, impl
->syncobj
);
807 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
813 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT
: {
814 int fd
= anv_gem_syncobj_export_sync_file(device
, impl
->syncobj
);
816 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
823 unreachable("Invalid fence export handle type");
826 /* From the Vulkan 1.0.53 spec:
828 * "Export operations have the same transference as the specified handle
829 * type’s import operations. [...] If the fence was using a
830 * temporarily imported payload, the fence’s prior permanent payload
833 if (impl
== &fence
->temporary
)
834 anv_fence_impl_cleanup(device
, impl
);
839 // Queue semaphore functions
841 VkResult
anv_CreateSemaphore(
843 const VkSemaphoreCreateInfo
* pCreateInfo
,
844 const VkAllocationCallbacks
* pAllocator
,
845 VkSemaphore
* pSemaphore
)
847 ANV_FROM_HANDLE(anv_device
, device
, _device
);
848 struct anv_semaphore
*semaphore
;
850 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
);
852 semaphore
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*semaphore
), 8,
853 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
854 if (semaphore
== NULL
)
855 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
857 const VkExportSemaphoreCreateInfoKHR
*export
=
858 vk_find_struct_const(pCreateInfo
->pNext
, EXPORT_SEMAPHORE_CREATE_INFO
);
859 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes
=
860 export
? export
->handleTypes
: 0;
862 if (handleTypes
== 0) {
863 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
864 * on the same ring. Since we don't expose the blit engine as a DMA
865 * queue, a dummy no-op semaphore is a perfectly valid implementation.
867 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DUMMY
;
868 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
) {
869 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
);
870 if (device
->instance
->physicalDevice
.has_syncobj
) {
871 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
872 semaphore
->permanent
.syncobj
= anv_gem_syncobj_create(device
, 0);
873 if (!semaphore
->permanent
.syncobj
) {
874 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
875 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
878 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_BO
;
879 VkResult result
= anv_bo_cache_alloc(device
, &device
->bo_cache
,
881 &semaphore
->permanent
.bo
);
882 if (result
!= VK_SUCCESS
) {
883 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
887 /* If we're going to use this as a fence, we need to *not* have the
888 * EXEC_OBJECT_ASYNC bit set.
890 assert(!(semaphore
->permanent
.bo
->flags
& EXEC_OBJECT_ASYNC
));
892 } else if (handleTypes
& VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
) {
893 assert(handleTypes
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
);
895 semaphore
->permanent
.type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
;
896 semaphore
->permanent
.fd
= -1;
898 assert(!"Unknown handle type");
899 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
900 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
903 semaphore
->temporary
.type
= ANV_SEMAPHORE_TYPE_NONE
;
905 *pSemaphore
= anv_semaphore_to_handle(semaphore
);
911 anv_semaphore_impl_cleanup(struct anv_device
*device
,
912 struct anv_semaphore_impl
*impl
)
914 switch (impl
->type
) {
915 case ANV_SEMAPHORE_TYPE_NONE
:
916 case ANV_SEMAPHORE_TYPE_DUMMY
:
917 /* Dummy. Nothing to do */
920 case ANV_SEMAPHORE_TYPE_BO
:
921 anv_bo_cache_release(device
, &device
->bo_cache
, impl
->bo
);
924 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
928 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
929 anv_gem_syncobj_destroy(device
, impl
->syncobj
);
933 unreachable("Invalid semaphore type");
937 anv_semaphore_reset_temporary(struct anv_device
*device
,
938 struct anv_semaphore
*semaphore
)
940 if (semaphore
->temporary
.type
== ANV_SEMAPHORE_TYPE_NONE
)
943 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
944 semaphore
->temporary
.type
= ANV_SEMAPHORE_TYPE_NONE
;
947 void anv_DestroySemaphore(
949 VkSemaphore _semaphore
,
950 const VkAllocationCallbacks
* pAllocator
)
952 ANV_FROM_HANDLE(anv_device
, device
, _device
);
953 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, _semaphore
);
955 if (semaphore
== NULL
)
958 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
959 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
961 vk_free2(&device
->alloc
, pAllocator
, semaphore
);
964 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
965 VkPhysicalDevice physicalDevice
,
966 const VkPhysicalDeviceExternalSemaphoreInfoKHR
* pExternalSemaphoreInfo
,
967 VkExternalSemaphorePropertiesKHR
* pExternalSemaphoreProperties
)
969 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
971 switch (pExternalSemaphoreInfo
->handleType
) {
972 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
973 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
=
974 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
;
975 pExternalSemaphoreProperties
->compatibleHandleTypes
=
976 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
;
977 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
978 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
979 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
982 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
983 if (device
->has_exec_fence
) {
984 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
985 pExternalSemaphoreProperties
->compatibleHandleTypes
=
986 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
987 pExternalSemaphoreProperties
->externalSemaphoreFeatures
=
988 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
989 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
998 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
999 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
1000 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
1003 VkResult
anv_ImportSemaphoreFdKHR(
1005 const VkImportSemaphoreFdInfoKHR
* pImportSemaphoreFdInfo
)
1007 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1008 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pImportSemaphoreFdInfo
->semaphore
);
1009 int fd
= pImportSemaphoreFdInfo
->fd
;
1011 struct anv_semaphore_impl new_impl
= {
1012 .type
= ANV_SEMAPHORE_TYPE_NONE
,
1015 switch (pImportSemaphoreFdInfo
->handleType
) {
1016 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
1017 if (device
->instance
->physicalDevice
.has_syncobj
) {
1018 new_impl
.type
= ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
;
1020 new_impl
.syncobj
= anv_gem_syncobj_fd_to_handle(device
, fd
);
1021 if (!new_impl
.syncobj
)
1022 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1024 new_impl
.type
= ANV_SEMAPHORE_TYPE_BO
;
1026 VkResult result
= anv_bo_cache_import(device
, &device
->bo_cache
,
1027 fd
, 0, &new_impl
.bo
);
1028 if (result
!= VK_SUCCESS
)
1031 if (new_impl
.bo
->size
< 4096) {
1032 anv_bo_cache_release(device
, &device
->bo_cache
, new_impl
.bo
);
1033 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
);
1036 /* If we're going to use this as a fence, we need to *not* have the
1037 * EXEC_OBJECT_ASYNC bit set.
1039 assert(!(new_impl
.bo
->flags
& EXEC_OBJECT_ASYNC
));
1042 /* From the Vulkan spec:
1044 * "Importing semaphore state from a file descriptor transfers
1045 * ownership of the file descriptor from the application to the
1046 * Vulkan implementation. The application must not perform any
1047 * operations on the file descriptor after a successful import."
1049 * If the import fails, we leave the file descriptor open.
1054 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
1055 new_impl
= (struct anv_semaphore_impl
) {
1056 .type
= ANV_SEMAPHORE_TYPE_SYNC_FILE
,
1062 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1065 if (pImportSemaphoreFdInfo
->flags
& VK_SEMAPHORE_IMPORT_TEMPORARY_BIT
) {
1066 anv_semaphore_impl_cleanup(device
, &semaphore
->temporary
);
1067 semaphore
->temporary
= new_impl
;
1069 anv_semaphore_impl_cleanup(device
, &semaphore
->permanent
);
1070 semaphore
->permanent
= new_impl
;
1076 VkResult
anv_GetSemaphoreFdKHR(
1078 const VkSemaphoreGetFdInfoKHR
* pGetFdInfo
,
1081 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1082 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, pGetFdInfo
->semaphore
);
1086 assert(pGetFdInfo
->sType
== VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR
);
1088 struct anv_semaphore_impl
*impl
=
1089 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
1090 &semaphore
->temporary
: &semaphore
->permanent
;
1092 switch (impl
->type
) {
1093 case ANV_SEMAPHORE_TYPE_BO
:
1094 result
= anv_bo_cache_export(device
, &device
->bo_cache
, impl
->bo
, pFd
);
1095 if (result
!= VK_SUCCESS
)
1099 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
1100 /* There are two reasons why this could happen:
1102 * 1) The user is trying to export without submitting something that
1103 * signals the semaphore. If this is the case, it's their bug so
1104 * what we return here doesn't matter.
1106 * 2) The kernel didn't give us a file descriptor. The most likely
1107 * reason for this is running out of file descriptors.
1110 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1114 /* From the Vulkan 1.0.53 spec:
1116 * "...exporting a semaphore payload to a handle with copy
1117 * transference has the same side effects on the source
1118 * semaphore’s payload as executing a semaphore wait operation."
1120 * In other words, it may still be a SYNC_FD semaphore, but it's now
1121 * considered to have been waited on and no longer has a sync file
1127 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
1128 fd
= anv_gem_syncobj_handle_to_fd(device
, impl
->syncobj
);
1130 return vk_error(VK_ERROR_TOO_MANY_OBJECTS
);
1135 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1138 /* From the Vulkan 1.0.53 spec:
1140 * "Export operations have the same transference as the specified handle
1141 * type’s import operations. [...] If the semaphore was using a
1142 * temporarily imported payload, the semaphore’s prior permanent payload
1145 if (impl
== &semaphore
->temporary
)
1146 anv_semaphore_impl_cleanup(device
, impl
);