2a8ed2eb4ed135e86c2ed6cd96c4fbfecae0c80a
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 VkResult
38 anv_device_execbuf(struct anv_device *device,
39 struct drm_i915_gem_execbuffer2 *execbuf,
40 struct anv_bo **execbuf_bos)
41 {
42 int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
43 if (ret != 0) {
44 /* We don't know the real error. */
45 return anv_device_set_lost(device, "execbuf2 failed: %m");
46 }
47
48 struct drm_i915_gem_exec_object2 *objects =
49 (void *)(uintptr_t)execbuf->buffers_ptr;
50 for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
51 if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
52 assert(execbuf_bos[k]->offset == objects[k].offset);
53 execbuf_bos[k]->offset = objects[k].offset;
54 }
55
56 return VK_SUCCESS;
57 }
58
59 VkResult
60 anv_device_submit_simple_batch(struct anv_device *device,
61 struct anv_batch *batch)
62 {
63 struct drm_i915_gem_execbuffer2 execbuf;
64 struct drm_i915_gem_exec_object2 exec2_objects[1];
65 struct anv_bo bo, *exec_bos[1];
66 VkResult result = VK_SUCCESS;
67 uint32_t size;
68
69 /* Kernel driver requires 8 byte aligned batch length */
70 size = align_u32(batch->next - batch->start, 8);
71 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
72 if (result != VK_SUCCESS)
73 return result;
74
75 memcpy(bo.map, batch->start, size);
76 if (!device->info.has_llc)
77 gen_flush_range(bo.map, size);
78
79 exec_bos[0] = &bo;
80 exec2_objects[0].handle = bo.gem_handle;
81 exec2_objects[0].relocation_count = 0;
82 exec2_objects[0].relocs_ptr = 0;
83 exec2_objects[0].alignment = 0;
84 exec2_objects[0].offset = bo.offset;
85 exec2_objects[0].flags = bo.flags;
86 exec2_objects[0].rsvd1 = 0;
87 exec2_objects[0].rsvd2 = 0;
88
89 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
90 execbuf.buffer_count = 1;
91 execbuf.batch_start_offset = 0;
92 execbuf.batch_len = size;
93 execbuf.cliprects_ptr = 0;
94 execbuf.num_cliprects = 0;
95 execbuf.DR1 = 0;
96 execbuf.DR4 = 0;
97
98 execbuf.flags =
99 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
100 execbuf.rsvd1 = device->context_id;
101 execbuf.rsvd2 = 0;
102
103 result = anv_device_execbuf(device, &execbuf, exec_bos);
104 if (result != VK_SUCCESS)
105 goto fail;
106
107 result = anv_device_wait(device, &bo, INT64_MAX);
108
109 fail:
110 anv_bo_pool_free(&device->batch_bo_pool, &bo);
111
112 return result;
113 }
114
115 VkResult anv_QueueSubmit(
116 VkQueue _queue,
117 uint32_t submitCount,
118 const VkSubmitInfo* pSubmits,
119 VkFence fence)
120 {
121 ANV_FROM_HANDLE(anv_queue, queue, _queue);
122 struct anv_device *device = queue->device;
123
124 /* Query for device status prior to submitting. Technically, we don't need
125 * to do this. However, if we have a client that's submitting piles of
126 * garbage, we would rather break as early as possible to keep the GPU
127 * hanging contained. If we don't check here, we'll either be waiting for
128 * the kernel to kick us or we'll have to wait until the client waits on a
129 * fence before we actually know whether or not we've hung.
130 */
131 VkResult result = anv_device_query_status(device);
132 if (result != VK_SUCCESS)
133 return result;
134
135 /* We lock around QueueSubmit for three main reasons:
136 *
137 * 1) When a block pool is resized, we create a new gem handle with a
138 * different size and, in the case of surface states, possibly a
139 * different center offset but we re-use the same anv_bo struct when
140 * we do so. If this happens in the middle of setting up an execbuf,
141 * we could end up with our list of BOs out of sync with our list of
142 * gem handles.
143 *
144 * 2) The algorithm we use for building the list of unique buffers isn't
145 * thread-safe. While the client is supposed to syncronize around
146 * QueueSubmit, this would be extremely difficult to debug if it ever
147 * came up in the wild due to a broken app. It's better to play it
148 * safe and just lock around QueueSubmit.
149 *
150 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
151 * userspace. Due to the fact that the surface state buffer is shared
152 * between batches, we can't afford to have that happen from multiple
153 * threads at the same time. Even though the user is supposed to
154 * ensure this doesn't happen, we play it safe as in (2) above.
155 *
156 * Since the only other things that ever take the device lock such as block
157 * pool resize only rarely happen, this will almost never be contended so
158 * taking a lock isn't really an expensive operation in this case.
159 */
160 pthread_mutex_lock(&device->mutex);
161
162 if (fence && submitCount == 0) {
163 /* If we don't have any command buffers, we need to submit a dummy
164 * batch to give GEM something to wait on. We could, potentially,
165 * come up with something more efficient but this shouldn't be a
166 * common case.
167 */
168 result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
169 goto out;
170 }
171
172 for (uint32_t i = 0; i < submitCount; i++) {
173 /* Fence for this submit. NULL for all but the last one */
174 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
175
176 if (pSubmits[i].commandBufferCount == 0) {
177 /* If we don't have any command buffers, we need to submit a dummy
178 * batch to give GEM something to wait on. We could, potentially,
179 * come up with something more efficient but this shouldn't be a
180 * common case.
181 */
182 result = anv_cmd_buffer_execbuf(device, NULL,
183 pSubmits[i].pWaitSemaphores,
184 pSubmits[i].waitSemaphoreCount,
185 pSubmits[i].pSignalSemaphores,
186 pSubmits[i].signalSemaphoreCount,
187 submit_fence);
188 if (result != VK_SUCCESS)
189 goto out;
190
191 continue;
192 }
193
194 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
195 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
196 pSubmits[i].pCommandBuffers[j]);
197 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
198 assert(!anv_batch_has_error(&cmd_buffer->batch));
199
200 /* Fence for this execbuf. NULL for all but the last one */
201 VkFence execbuf_fence =
202 (j == pSubmits[i].commandBufferCount - 1) ?
203 submit_fence : VK_NULL_HANDLE;
204
205 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
206 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
207 if (j == 0) {
208 /* Only the first batch gets the in semaphores */
209 in_semaphores = pSubmits[i].pWaitSemaphores;
210 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
211 }
212
213 if (j == pSubmits[i].commandBufferCount - 1) {
214 /* Only the last batch gets the out semaphores */
215 out_semaphores = pSubmits[i].pSignalSemaphores;
216 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
217 }
218
219 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
220 in_semaphores, num_in_semaphores,
221 out_semaphores, num_out_semaphores,
222 execbuf_fence);
223 if (result != VK_SUCCESS)
224 goto out;
225 }
226 }
227
228 pthread_cond_broadcast(&device->queue_submit);
229
230 out:
231 if (result != VK_SUCCESS) {
232 /* In the case that something has gone wrong we may end up with an
233 * inconsistent state from which it may not be trivial to recover.
234 * For example, we might have computed address relocations and
235 * any future attempt to re-submit this job will need to know about
236 * this and avoid computing relocation addresses again.
237 *
238 * To avoid this sort of issues, we assume that if something was
239 * wrong during submission we must already be in a really bad situation
240 * anyway (such us being out of memory) and return
241 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
242 * submit the same job again to this device.
243 */
244 result = anv_device_set_lost(device, "vkQueueSubmit() failed");
245 }
246
247 pthread_mutex_unlock(&device->mutex);
248
249 return result;
250 }
251
252 VkResult anv_QueueWaitIdle(
253 VkQueue _queue)
254 {
255 ANV_FROM_HANDLE(anv_queue, queue, _queue);
256
257 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
258 }
259
260 VkResult anv_CreateFence(
261 VkDevice _device,
262 const VkFenceCreateInfo* pCreateInfo,
263 const VkAllocationCallbacks* pAllocator,
264 VkFence* pFence)
265 {
266 ANV_FROM_HANDLE(anv_device, device, _device);
267 struct anv_fence *fence;
268
269 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
270
271 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
272 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
273 if (fence == NULL)
274 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
275
276 if (device->instance->physicalDevice.has_syncobj_wait) {
277 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
278
279 uint32_t create_flags = 0;
280 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
281 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
282
283 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
284 if (!fence->permanent.syncobj)
285 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
286 } else {
287 fence->permanent.type = ANV_FENCE_TYPE_BO;
288
289 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
290 &fence->permanent.bo.bo, 4096);
291 if (result != VK_SUCCESS)
292 return result;
293
294 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
295 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
296 } else {
297 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
298 }
299 }
300
301 *pFence = anv_fence_to_handle(fence);
302
303 return VK_SUCCESS;
304 }
305
306 static void
307 anv_fence_impl_cleanup(struct anv_device *device,
308 struct anv_fence_impl *impl)
309 {
310 switch (impl->type) {
311 case ANV_FENCE_TYPE_NONE:
312 /* Dummy. Nothing to do */
313 break;
314
315 case ANV_FENCE_TYPE_BO:
316 anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
317 break;
318
319 case ANV_FENCE_TYPE_SYNCOBJ:
320 anv_gem_syncobj_destroy(device, impl->syncobj);
321 break;
322
323 case ANV_FENCE_TYPE_WSI:
324 impl->fence_wsi->destroy(impl->fence_wsi);
325 break;
326
327 default:
328 unreachable("Invalid fence type");
329 }
330
331 impl->type = ANV_FENCE_TYPE_NONE;
332 }
333
334 void anv_DestroyFence(
335 VkDevice _device,
336 VkFence _fence,
337 const VkAllocationCallbacks* pAllocator)
338 {
339 ANV_FROM_HANDLE(anv_device, device, _device);
340 ANV_FROM_HANDLE(anv_fence, fence, _fence);
341
342 if (!fence)
343 return;
344
345 anv_fence_impl_cleanup(device, &fence->temporary);
346 anv_fence_impl_cleanup(device, &fence->permanent);
347
348 vk_free2(&device->alloc, pAllocator, fence);
349 }
350
351 VkResult anv_ResetFences(
352 VkDevice _device,
353 uint32_t fenceCount,
354 const VkFence* pFences)
355 {
356 ANV_FROM_HANDLE(anv_device, device, _device);
357
358 for (uint32_t i = 0; i < fenceCount; i++) {
359 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
360
361 /* From the Vulkan 1.0.53 spec:
362 *
363 * "If any member of pFences currently has its payload imported with
364 * temporary permanence, that fence’s prior permanent payload is
365 * first restored. The remaining operations described therefore
366 * operate on the restored payload.
367 */
368 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
369 anv_fence_impl_cleanup(device, &fence->temporary);
370
371 struct anv_fence_impl *impl = &fence->permanent;
372
373 switch (impl->type) {
374 case ANV_FENCE_TYPE_BO:
375 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
376 break;
377
378 case ANV_FENCE_TYPE_SYNCOBJ:
379 anv_gem_syncobj_reset(device, impl->syncobj);
380 break;
381
382 default:
383 unreachable("Invalid fence type");
384 }
385 }
386
387 return VK_SUCCESS;
388 }
389
390 VkResult anv_GetFenceStatus(
391 VkDevice _device,
392 VkFence _fence)
393 {
394 ANV_FROM_HANDLE(anv_device, device, _device);
395 ANV_FROM_HANDLE(anv_fence, fence, _fence);
396
397 if (anv_device_is_lost(device))
398 return VK_ERROR_DEVICE_LOST;
399
400 struct anv_fence_impl *impl =
401 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
402 &fence->temporary : &fence->permanent;
403
404 switch (impl->type) {
405 case ANV_FENCE_TYPE_BO:
406 /* BO fences don't support import/export */
407 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
408 switch (impl->bo.state) {
409 case ANV_BO_FENCE_STATE_RESET:
410 /* If it hasn't even been sent off to the GPU yet, it's not ready */
411 return VK_NOT_READY;
412
413 case ANV_BO_FENCE_STATE_SIGNALED:
414 /* It's been signaled, return success */
415 return VK_SUCCESS;
416
417 case ANV_BO_FENCE_STATE_SUBMITTED: {
418 VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
419 if (result == VK_SUCCESS) {
420 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
421 return VK_SUCCESS;
422 } else {
423 return result;
424 }
425 }
426 default:
427 unreachable("Invalid fence status");
428 }
429
430 case ANV_FENCE_TYPE_SYNCOBJ: {
431 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
432 if (ret == -1) {
433 if (errno == ETIME) {
434 return VK_NOT_READY;
435 } else {
436 /* We don't know the real error. */
437 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
438 }
439 } else {
440 return VK_SUCCESS;
441 }
442 }
443
444 default:
445 unreachable("Invalid fence type");
446 }
447 }
448
449 #define NSEC_PER_SEC 1000000000
450 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
451
452 static uint64_t
453 gettime_ns(void)
454 {
455 struct timespec current;
456 clock_gettime(CLOCK_MONOTONIC, &current);
457 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
458 }
459
460 static uint64_t anv_get_absolute_timeout(uint64_t timeout)
461 {
462 if (timeout == 0)
463 return 0;
464 uint64_t current_time = gettime_ns();
465 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
466
467 timeout = MIN2(max_timeout, timeout);
468
469 return (current_time + timeout);
470 }
471
472 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
473 {
474 uint64_t now = gettime_ns();
475
476 /* We don't want negative timeouts.
477 *
478 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
479 * supposed to block indefinitely timeouts < 0. Unfortunately,
480 * this was broken for a couple of kernel releases. Since there's
481 * no way to know whether or not the kernel we're using is one of
482 * the broken ones, the best we can do is to clamp the timeout to
483 * INT64_MAX. This limits the maximum timeout from 584 years to
484 * 292 years - likely not a big deal.
485 */
486 if (abs_timeout < now)
487 return 0;
488
489 uint64_t rel_timeout = abs_timeout - now;
490 if (rel_timeout > (uint64_t) INT64_MAX)
491 rel_timeout = INT64_MAX;
492
493 return rel_timeout;
494 }
495
496 static VkResult
497 anv_wait_for_syncobj_fences(struct anv_device *device,
498 uint32_t fenceCount,
499 const VkFence *pFences,
500 bool waitAll,
501 uint64_t abs_timeout_ns)
502 {
503 uint32_t *syncobjs = vk_zalloc(&device->alloc,
504 sizeof(*syncobjs) * fenceCount, 8,
505 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
506 if (!syncobjs)
507 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
508
509 for (uint32_t i = 0; i < fenceCount; i++) {
510 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
511 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
512
513 struct anv_fence_impl *impl =
514 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
515 &fence->temporary : &fence->permanent;
516
517 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
518 syncobjs[i] = impl->syncobj;
519 }
520
521 /* The gem_syncobj_wait ioctl may return early due to an inherent
522 * limitation in the way it computes timeouts. Loop until we've actually
523 * passed the timeout.
524 */
525 int ret;
526 do {
527 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
528 abs_timeout_ns, waitAll);
529 } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
530
531 vk_free(&device->alloc, syncobjs);
532
533 if (ret == -1) {
534 if (errno == ETIME) {
535 return VK_TIMEOUT;
536 } else {
537 /* We don't know the real error. */
538 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
539 }
540 } else {
541 return VK_SUCCESS;
542 }
543 }
544
545 static VkResult
546 anv_wait_for_bo_fences(struct anv_device *device,
547 uint32_t fenceCount,
548 const VkFence *pFences,
549 bool waitAll,
550 uint64_t abs_timeout_ns)
551 {
552 VkResult result = VK_SUCCESS;
553 uint32_t pending_fences = fenceCount;
554 while (pending_fences) {
555 pending_fences = 0;
556 bool signaled_fences = false;
557 for (uint32_t i = 0; i < fenceCount; i++) {
558 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
559
560 /* This function assumes that all fences are BO fences and that they
561 * have no temporary state. Since BO fences will never be exported,
562 * this should be a safe assumption.
563 */
564 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
565 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
566 struct anv_fence_impl *impl = &fence->permanent;
567
568 switch (impl->bo.state) {
569 case ANV_BO_FENCE_STATE_RESET:
570 /* This fence hasn't been submitted yet, we'll catch it the next
571 * time around. Yes, this may mean we dead-loop but, short of
572 * lots of locking and a condition variable, there's not much that
573 * we can do about that.
574 */
575 pending_fences++;
576 continue;
577
578 case ANV_BO_FENCE_STATE_SIGNALED:
579 /* This fence is not pending. If waitAll isn't set, we can return
580 * early. Otherwise, we have to keep going.
581 */
582 if (!waitAll) {
583 result = VK_SUCCESS;
584 goto done;
585 }
586 continue;
587
588 case ANV_BO_FENCE_STATE_SUBMITTED:
589 /* These are the fences we really care about. Go ahead and wait
590 * on it until we hit a timeout.
591 */
592 result = anv_device_wait(device, &impl->bo.bo,
593 anv_get_relative_timeout(abs_timeout_ns));
594 switch (result) {
595 case VK_SUCCESS:
596 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
597 signaled_fences = true;
598 if (!waitAll)
599 goto done;
600 break;
601
602 case VK_TIMEOUT:
603 goto done;
604
605 default:
606 return result;
607 }
608 }
609 }
610
611 if (pending_fences && !signaled_fences) {
612 /* If we've hit this then someone decided to vkWaitForFences before
613 * they've actually submitted any of them to a queue. This is a
614 * fairly pessimal case, so it's ok to lock here and use a standard
615 * pthreads condition variable.
616 */
617 pthread_mutex_lock(&device->mutex);
618
619 /* It's possible that some of the fences have changed state since the
620 * last time we checked. Now that we have the lock, check for
621 * pending fences again and don't wait if it's changed.
622 */
623 uint32_t now_pending_fences = 0;
624 for (uint32_t i = 0; i < fenceCount; i++) {
625 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
626 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
627 now_pending_fences++;
628 }
629 assert(now_pending_fences <= pending_fences);
630
631 if (now_pending_fences == pending_fences) {
632 struct timespec abstime = {
633 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
634 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
635 };
636
637 MAYBE_UNUSED int ret;
638 ret = pthread_cond_timedwait(&device->queue_submit,
639 &device->mutex, &abstime);
640 assert(ret != EINVAL);
641 if (gettime_ns() >= abs_timeout_ns) {
642 pthread_mutex_unlock(&device->mutex);
643 result = VK_TIMEOUT;
644 goto done;
645 }
646 }
647
648 pthread_mutex_unlock(&device->mutex);
649 }
650 }
651
652 done:
653 if (anv_device_is_lost(device))
654 return VK_ERROR_DEVICE_LOST;
655
656 return result;
657 }
658
659 static VkResult
660 anv_wait_for_wsi_fence(struct anv_device *device,
661 const VkFence _fence,
662 uint64_t abs_timeout)
663 {
664 ANV_FROM_HANDLE(anv_fence, fence, _fence);
665 struct anv_fence_impl *impl = &fence->permanent;
666
667 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
668 }
669
670 static VkResult
671 anv_wait_for_fences(struct anv_device *device,
672 uint32_t fenceCount,
673 const VkFence *pFences,
674 bool waitAll,
675 uint64_t abs_timeout)
676 {
677 VkResult result = VK_SUCCESS;
678
679 if (fenceCount <= 1 || waitAll) {
680 for (uint32_t i = 0; i < fenceCount; i++) {
681 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
682 switch (fence->permanent.type) {
683 case ANV_FENCE_TYPE_BO:
684 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
685 true, abs_timeout);
686 break;
687 case ANV_FENCE_TYPE_SYNCOBJ:
688 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
689 true, abs_timeout);
690 break;
691 case ANV_FENCE_TYPE_WSI:
692 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
693 break;
694 case ANV_FENCE_TYPE_NONE:
695 result = VK_SUCCESS;
696 break;
697 }
698 if (result != VK_SUCCESS)
699 return result;
700 }
701 } else {
702 do {
703 for (uint32_t i = 0; i < fenceCount; i++) {
704 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
705 return VK_SUCCESS;
706 }
707 } while (gettime_ns() < abs_timeout);
708 result = VK_TIMEOUT;
709 }
710 return result;
711 }
712
713 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
714 {
715 for (uint32_t i = 0; i < fenceCount; ++i) {
716 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
717 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
718 return false;
719 }
720 return true;
721 }
722
723 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
724 {
725 for (uint32_t i = 0; i < fenceCount; ++i) {
726 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
727 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
728 return false;
729 }
730 return true;
731 }
732
733 VkResult anv_WaitForFences(
734 VkDevice _device,
735 uint32_t fenceCount,
736 const VkFence* pFences,
737 VkBool32 waitAll,
738 uint64_t timeout)
739 {
740 ANV_FROM_HANDLE(anv_device, device, _device);
741
742 if (anv_device_is_lost(device))
743 return VK_ERROR_DEVICE_LOST;
744
745 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
746 if (anv_all_fences_syncobj(fenceCount, pFences)) {
747 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
748 waitAll, abs_timeout);
749 } else if (anv_all_fences_bo(fenceCount, pFences)) {
750 return anv_wait_for_bo_fences(device, fenceCount, pFences,
751 waitAll, abs_timeout);
752 } else {
753 return anv_wait_for_fences(device, fenceCount, pFences,
754 waitAll, abs_timeout);
755 }
756 }
757
758 void anv_GetPhysicalDeviceExternalFenceProperties(
759 VkPhysicalDevice physicalDevice,
760 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
761 VkExternalFencePropertiesKHR* pExternalFenceProperties)
762 {
763 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
764
765 switch (pExternalFenceInfo->handleType) {
766 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
767 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
768 if (device->has_syncobj_wait) {
769 pExternalFenceProperties->exportFromImportedHandleTypes =
770 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
771 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
772 pExternalFenceProperties->compatibleHandleTypes =
773 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
774 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
775 pExternalFenceProperties->externalFenceFeatures =
776 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
777 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
778 return;
779 }
780 break;
781
782 default:
783 break;
784 }
785
786 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
787 pExternalFenceProperties->compatibleHandleTypes = 0;
788 pExternalFenceProperties->externalFenceFeatures = 0;
789 }
790
791 VkResult anv_ImportFenceFdKHR(
792 VkDevice _device,
793 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
794 {
795 ANV_FROM_HANDLE(anv_device, device, _device);
796 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
797 int fd = pImportFenceFdInfo->fd;
798
799 assert(pImportFenceFdInfo->sType ==
800 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
801
802 struct anv_fence_impl new_impl = {
803 .type = ANV_FENCE_TYPE_NONE,
804 };
805
806 switch (pImportFenceFdInfo->handleType) {
807 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
808 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
809
810 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
811 if (!new_impl.syncobj)
812 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
813
814 break;
815
816 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
817 /* Sync files are a bit tricky. Because we want to continue using the
818 * syncobj implementation of WaitForFences, we don't use the sync file
819 * directly but instead import it into a syncobj.
820 */
821 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
822
823 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
824 if (!new_impl.syncobj)
825 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
826
827 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
828 anv_gem_syncobj_destroy(device, new_impl.syncobj);
829 return vk_errorf(device->instance, NULL,
830 VK_ERROR_INVALID_EXTERNAL_HANDLE,
831 "syncobj sync file import failed: %m");
832 }
833 break;
834
835 default:
836 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
837 }
838
839 /* From the Vulkan 1.0.53 spec:
840 *
841 * "Importing a fence payload from a file descriptor transfers
842 * ownership of the file descriptor from the application to the
843 * Vulkan implementation. The application must not perform any
844 * operations on the file descriptor after a successful import."
845 *
846 * If the import fails, we leave the file descriptor open.
847 */
848 close(fd);
849
850 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
851 anv_fence_impl_cleanup(device, &fence->temporary);
852 fence->temporary = new_impl;
853 } else {
854 anv_fence_impl_cleanup(device, &fence->permanent);
855 fence->permanent = new_impl;
856 }
857
858 return VK_SUCCESS;
859 }
860
861 VkResult anv_GetFenceFdKHR(
862 VkDevice _device,
863 const VkFenceGetFdInfoKHR* pGetFdInfo,
864 int* pFd)
865 {
866 ANV_FROM_HANDLE(anv_device, device, _device);
867 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
868
869 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
870
871 struct anv_fence_impl *impl =
872 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
873 &fence->temporary : &fence->permanent;
874
875 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
876 switch (pGetFdInfo->handleType) {
877 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
878 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
879 if (fd < 0)
880 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
881
882 *pFd = fd;
883 break;
884 }
885
886 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
887 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
888 if (fd < 0)
889 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
890
891 *pFd = fd;
892 break;
893 }
894
895 default:
896 unreachable("Invalid fence export handle type");
897 }
898
899 /* From the Vulkan 1.0.53 spec:
900 *
901 * "Export operations have the same transference as the specified handle
902 * type’s import operations. [...] If the fence was using a
903 * temporarily imported payload, the fence’s prior permanent payload
904 * will be restored.
905 */
906 if (impl == &fence->temporary)
907 anv_fence_impl_cleanup(device, impl);
908
909 return VK_SUCCESS;
910 }
911
912 // Queue semaphore functions
913
914 VkResult anv_CreateSemaphore(
915 VkDevice _device,
916 const VkSemaphoreCreateInfo* pCreateInfo,
917 const VkAllocationCallbacks* pAllocator,
918 VkSemaphore* pSemaphore)
919 {
920 ANV_FROM_HANDLE(anv_device, device, _device);
921 struct anv_semaphore *semaphore;
922
923 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
924
925 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
926 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
927 if (semaphore == NULL)
928 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
929
930 const VkExportSemaphoreCreateInfoKHR *export =
931 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
932 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
933 export ? export->handleTypes : 0;
934
935 if (handleTypes == 0) {
936 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
937 * on the same ring. Since we don't expose the blit engine as a DMA
938 * queue, a dummy no-op semaphore is a perfectly valid implementation.
939 */
940 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
941 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
942 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
943 if (device->instance->physicalDevice.has_syncobj) {
944 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
945 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
946 if (!semaphore->permanent.syncobj) {
947 vk_free2(&device->alloc, pAllocator, semaphore);
948 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
949 }
950 } else {
951 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
952 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
953 4096, ANV_BO_EXTERNAL,
954 &semaphore->permanent.bo);
955 if (result != VK_SUCCESS) {
956 vk_free2(&device->alloc, pAllocator, semaphore);
957 return result;
958 }
959
960 /* If we're going to use this as a fence, we need to *not* have the
961 * EXEC_OBJECT_ASYNC bit set.
962 */
963 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
964 }
965 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
966 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
967
968 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
969 semaphore->permanent.fd = -1;
970 } else {
971 assert(!"Unknown handle type");
972 vk_free2(&device->alloc, pAllocator, semaphore);
973 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
974 }
975
976 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
977
978 *pSemaphore = anv_semaphore_to_handle(semaphore);
979
980 return VK_SUCCESS;
981 }
982
983 static void
984 anv_semaphore_impl_cleanup(struct anv_device *device,
985 struct anv_semaphore_impl *impl)
986 {
987 switch (impl->type) {
988 case ANV_SEMAPHORE_TYPE_NONE:
989 case ANV_SEMAPHORE_TYPE_DUMMY:
990 /* Dummy. Nothing to do */
991 break;
992
993 case ANV_SEMAPHORE_TYPE_BO:
994 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
995 break;
996
997 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
998 close(impl->fd);
999 break;
1000
1001 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1002 anv_gem_syncobj_destroy(device, impl->syncobj);
1003 break;
1004
1005 default:
1006 unreachable("Invalid semaphore type");
1007 }
1008
1009 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1010 }
1011
1012 void
1013 anv_semaphore_reset_temporary(struct anv_device *device,
1014 struct anv_semaphore *semaphore)
1015 {
1016 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1017 return;
1018
1019 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1020 }
1021
1022 void anv_DestroySemaphore(
1023 VkDevice _device,
1024 VkSemaphore _semaphore,
1025 const VkAllocationCallbacks* pAllocator)
1026 {
1027 ANV_FROM_HANDLE(anv_device, device, _device);
1028 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1029
1030 if (semaphore == NULL)
1031 return;
1032
1033 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1034 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1035
1036 vk_free2(&device->alloc, pAllocator, semaphore);
1037 }
1038
1039 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1040 VkPhysicalDevice physicalDevice,
1041 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
1042 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
1043 {
1044 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1045
1046 switch (pExternalSemaphoreInfo->handleType) {
1047 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1048 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1049 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1050 pExternalSemaphoreProperties->compatibleHandleTypes =
1051 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1052 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1053 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1054 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1055 return;
1056
1057 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1058 if (device->has_exec_fence) {
1059 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1060 pExternalSemaphoreProperties->compatibleHandleTypes =
1061 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1062 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1063 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1064 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1065 return;
1066 }
1067 break;
1068
1069 default:
1070 break;
1071 }
1072
1073 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1074 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1075 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1076 }
1077
1078 VkResult anv_ImportSemaphoreFdKHR(
1079 VkDevice _device,
1080 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1081 {
1082 ANV_FROM_HANDLE(anv_device, device, _device);
1083 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1084 int fd = pImportSemaphoreFdInfo->fd;
1085
1086 struct anv_semaphore_impl new_impl = {
1087 .type = ANV_SEMAPHORE_TYPE_NONE,
1088 };
1089
1090 switch (pImportSemaphoreFdInfo->handleType) {
1091 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1092 if (device->instance->physicalDevice.has_syncobj) {
1093 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1094
1095 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1096 if (!new_impl.syncobj)
1097 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1098 } else {
1099 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1100
1101 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1102 fd, ANV_BO_EXTERNAL,
1103 &new_impl.bo);
1104 if (result != VK_SUCCESS)
1105 return result;
1106
1107 if (new_impl.bo->size < 4096) {
1108 anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1109 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
1110 }
1111
1112 /* If we're going to use this as a fence, we need to *not* have the
1113 * EXEC_OBJECT_ASYNC bit set.
1114 */
1115 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1116 }
1117
1118 /* From the Vulkan spec:
1119 *
1120 * "Importing semaphore state from a file descriptor transfers
1121 * ownership of the file descriptor from the application to the
1122 * Vulkan implementation. The application must not perform any
1123 * operations on the file descriptor after a successful import."
1124 *
1125 * If the import fails, we leave the file descriptor open.
1126 */
1127 close(fd);
1128 break;
1129
1130 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1131 new_impl = (struct anv_semaphore_impl) {
1132 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1133 .fd = fd,
1134 };
1135 break;
1136
1137 default:
1138 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1139 }
1140
1141 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1142 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1143 semaphore->temporary = new_impl;
1144 } else {
1145 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1146 semaphore->permanent = new_impl;
1147 }
1148
1149 return VK_SUCCESS;
1150 }
1151
1152 VkResult anv_GetSemaphoreFdKHR(
1153 VkDevice _device,
1154 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1155 int* pFd)
1156 {
1157 ANV_FROM_HANDLE(anv_device, device, _device);
1158 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1159 VkResult result;
1160 int fd;
1161
1162 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1163
1164 struct anv_semaphore_impl *impl =
1165 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1166 &semaphore->temporary : &semaphore->permanent;
1167
1168 switch (impl->type) {
1169 case ANV_SEMAPHORE_TYPE_BO:
1170 result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1171 if (result != VK_SUCCESS)
1172 return result;
1173 break;
1174
1175 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1176 /* There are two reasons why this could happen:
1177 *
1178 * 1) The user is trying to export without submitting something that
1179 * signals the semaphore. If this is the case, it's their bug so
1180 * what we return here doesn't matter.
1181 *
1182 * 2) The kernel didn't give us a file descriptor. The most likely
1183 * reason for this is running out of file descriptors.
1184 */
1185 if (impl->fd < 0)
1186 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1187
1188 *pFd = impl->fd;
1189
1190 /* From the Vulkan 1.0.53 spec:
1191 *
1192 * "...exporting a semaphore payload to a handle with copy
1193 * transference has the same side effects on the source
1194 * semaphore’s payload as executing a semaphore wait operation."
1195 *
1196 * In other words, it may still be a SYNC_FD semaphore, but it's now
1197 * considered to have been waited on and no longer has a sync file
1198 * attached.
1199 */
1200 impl->fd = -1;
1201 return VK_SUCCESS;
1202
1203 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1204 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1205 if (fd < 0)
1206 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1207 *pFd = fd;
1208 break;
1209
1210 default:
1211 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1212 }
1213
1214 /* From the Vulkan 1.0.53 spec:
1215 *
1216 * "Export operations have the same transference as the specified handle
1217 * type’s import operations. [...] If the semaphore was using a
1218 * temporarily imported payload, the semaphore’s prior permanent payload
1219 * will be restored.
1220 */
1221 if (impl == &semaphore->temporary)
1222 anv_semaphore_impl_cleanup(device, impl);
1223
1224 return VK_SUCCESS;
1225 }