be7fd31008106db4e7244942d8b2215a578db4eb
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/eventfd.h>
31
32 #include "anv_private.h"
33 #include "util/vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 VkResult
38 anv_device_execbuf(struct anv_device *device,
39 struct drm_i915_gem_execbuffer2 *execbuf,
40 struct anv_bo **execbuf_bos)
41 {
42 int ret = anv_gem_execbuffer(device, execbuf);
43 if (ret != 0) {
44 /* We don't know the real error. */
45 device->lost = true;
46 return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
47 }
48
49 struct drm_i915_gem_exec_object2 *objects =
50 (void *)(uintptr_t)execbuf->buffers_ptr;
51 for (uint32_t k = 0; k < execbuf->buffer_count; k++)
52 execbuf_bos[k]->offset = objects[k].offset;
53
54 return VK_SUCCESS;
55 }
56
57 VkResult
58 anv_device_submit_simple_batch(struct anv_device *device,
59 struct anv_batch *batch)
60 {
61 struct drm_i915_gem_execbuffer2 execbuf;
62 struct drm_i915_gem_exec_object2 exec2_objects[1];
63 struct anv_bo bo, *exec_bos[1];
64 VkResult result = VK_SUCCESS;
65 uint32_t size;
66
67 /* Kernel driver requires 8 byte aligned batch length */
68 size = align_u32(batch->next - batch->start, 8);
69 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
70 if (result != VK_SUCCESS)
71 return result;
72
73 memcpy(bo.map, batch->start, size);
74 if (!device->info.has_llc)
75 anv_flush_range(bo.map, size);
76
77 exec_bos[0] = &bo;
78 exec2_objects[0].handle = bo.gem_handle;
79 exec2_objects[0].relocation_count = 0;
80 exec2_objects[0].relocs_ptr = 0;
81 exec2_objects[0].alignment = 0;
82 exec2_objects[0].offset = bo.offset;
83 exec2_objects[0].flags = 0;
84 exec2_objects[0].rsvd1 = 0;
85 exec2_objects[0].rsvd2 = 0;
86
87 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
88 execbuf.buffer_count = 1;
89 execbuf.batch_start_offset = 0;
90 execbuf.batch_len = size;
91 execbuf.cliprects_ptr = 0;
92 execbuf.num_cliprects = 0;
93 execbuf.DR1 = 0;
94 execbuf.DR4 = 0;
95
96 execbuf.flags =
97 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
98 execbuf.rsvd1 = device->context_id;
99 execbuf.rsvd2 = 0;
100
101 result = anv_device_execbuf(device, &execbuf, exec_bos);
102 if (result != VK_SUCCESS)
103 goto fail;
104
105 result = anv_device_wait(device, &bo, INT64_MAX);
106
107 fail:
108 anv_bo_pool_free(&device->batch_bo_pool, &bo);
109
110 return result;
111 }
112
113 VkResult anv_QueueSubmit(
114 VkQueue _queue,
115 uint32_t submitCount,
116 const VkSubmitInfo* pSubmits,
117 VkFence _fence)
118 {
119 ANV_FROM_HANDLE(anv_queue, queue, _queue);
120 ANV_FROM_HANDLE(anv_fence, fence, _fence);
121 struct anv_device *device = queue->device;
122
123 /* Query for device status prior to submitting. Technically, we don't need
124 * to do this. However, if we have a client that's submitting piles of
125 * garbage, we would rather break as early as possible to keep the GPU
126 * hanging contained. If we don't check here, we'll either be waiting for
127 * the kernel to kick us or we'll have to wait until the client waits on a
128 * fence before we actually know whether or not we've hung.
129 */
130 VkResult result = anv_device_query_status(device);
131 if (result != VK_SUCCESS)
132 return result;
133
134 /* We lock around QueueSubmit for three main reasons:
135 *
136 * 1) When a block pool is resized, we create a new gem handle with a
137 * different size and, in the case of surface states, possibly a
138 * different center offset but we re-use the same anv_bo struct when
139 * we do so. If this happens in the middle of setting up an execbuf,
140 * we could end up with our list of BOs out of sync with our list of
141 * gem handles.
142 *
143 * 2) The algorithm we use for building the list of unique buffers isn't
144 * thread-safe. While the client is supposed to syncronize around
145 * QueueSubmit, this would be extremely difficult to debug if it ever
146 * came up in the wild due to a broken app. It's better to play it
147 * safe and just lock around QueueSubmit.
148 *
149 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
150 * userspace. Due to the fact that the surface state buffer is shared
151 * between batches, we can't afford to have that happen from multiple
152 * threads at the same time. Even though the user is supposed to
153 * ensure this doesn't happen, we play it safe as in (2) above.
154 *
155 * Since the only other things that ever take the device lock such as block
156 * pool resize only rarely happen, this will almost never be contended so
157 * taking a lock isn't really an expensive operation in this case.
158 */
159 pthread_mutex_lock(&device->mutex);
160
161 for (uint32_t i = 0; i < submitCount; i++) {
162 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
163 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
164 pSubmits[i].pCommandBuffers[j]);
165 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
166 assert(!anv_batch_has_error(&cmd_buffer->batch));
167
168 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
169 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
170 if (j == 0) {
171 /* Only the first batch gets the in semaphores */
172 in_semaphores = pSubmits[i].pWaitSemaphores;
173 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
174 }
175
176 if (j == pSubmits[i].commandBufferCount - 1) {
177 /* Only the last batch gets the out semaphores */
178 out_semaphores = pSubmits[i].pSignalSemaphores;
179 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
180 }
181
182 result = anv_cmd_buffer_execbuf(device, cmd_buffer,
183 in_semaphores, num_in_semaphores,
184 out_semaphores, num_out_semaphores);
185 if (result != VK_SUCCESS)
186 goto out;
187 }
188 }
189
190 if (fence) {
191 struct anv_bo *fence_bo = &fence->bo;
192 result = anv_device_execbuf(device, &fence->execbuf, &fence_bo);
193 if (result != VK_SUCCESS)
194 goto out;
195
196 /* Update the fence and wake up any waiters */
197 assert(fence->state == ANV_FENCE_STATE_RESET);
198 fence->state = ANV_FENCE_STATE_SUBMITTED;
199 pthread_cond_broadcast(&device->queue_submit);
200 }
201
202 out:
203 if (result != VK_SUCCESS) {
204 /* In the case that something has gone wrong we may end up with an
205 * inconsistent state from which it may not be trivial to recover.
206 * For example, we might have computed address relocations and
207 * any future attempt to re-submit this job will need to know about
208 * this and avoid computing relocation addresses again.
209 *
210 * To avoid this sort of issues, we assume that if something was
211 * wrong during submission we must already be in a really bad situation
212 * anyway (such us being out of memory) and return
213 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
214 * submit the same job again to this device.
215 */
216 result = vk_errorf(VK_ERROR_DEVICE_LOST, "vkQueueSubmit() failed");
217 device->lost = true;
218
219 /* If we return VK_ERROR_DEVICE LOST here, we need to ensure that
220 * vkWaitForFences() and vkGetFenceStatus() return a valid result
221 * (VK_SUCCESS or VK_ERROR_DEVICE_LOST) in a finite amount of time.
222 * Setting the fence status to SIGNALED ensures this will happen in
223 * any case.
224 */
225 if (fence)
226 fence->state = ANV_FENCE_STATE_SIGNALED;
227 }
228
229 pthread_mutex_unlock(&device->mutex);
230
231 return result;
232 }
233
234 VkResult anv_QueueWaitIdle(
235 VkQueue _queue)
236 {
237 ANV_FROM_HANDLE(anv_queue, queue, _queue);
238
239 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
240 }
241
242 VkResult anv_CreateFence(
243 VkDevice _device,
244 const VkFenceCreateInfo* pCreateInfo,
245 const VkAllocationCallbacks* pAllocator,
246 VkFence* pFence)
247 {
248 ANV_FROM_HANDLE(anv_device, device, _device);
249 struct anv_bo fence_bo;
250 struct anv_fence *fence;
251 struct anv_batch batch;
252 VkResult result;
253
254 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
255
256 result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
257 if (result != VK_SUCCESS)
258 return result;
259
260 /* Fences are small. Just store the CPU data structure in the BO. */
261 fence = fence_bo.map;
262 fence->bo = fence_bo;
263
264 /* Place the batch after the CPU data but on its own cache line. */
265 const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
266 batch.next = batch.start = fence->bo.map + batch_offset;
267 batch.end = fence->bo.map + fence->bo.size;
268 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
269 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
270
271 if (!device->info.has_llc) {
272 assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
273 assert(batch.next - batch.start <= CACHELINE_SIZE);
274 __builtin_ia32_mfence();
275 __builtin_ia32_clflush(batch.start);
276 }
277
278 fence->exec2_objects[0].handle = fence->bo.gem_handle;
279 fence->exec2_objects[0].relocation_count = 0;
280 fence->exec2_objects[0].relocs_ptr = 0;
281 fence->exec2_objects[0].alignment = 0;
282 fence->exec2_objects[0].offset = fence->bo.offset;
283 fence->exec2_objects[0].flags = 0;
284 fence->exec2_objects[0].rsvd1 = 0;
285 fence->exec2_objects[0].rsvd2 = 0;
286
287 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
288 fence->execbuf.buffer_count = 1;
289 fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
290 fence->execbuf.batch_len = batch.next - batch.start;
291 fence->execbuf.cliprects_ptr = 0;
292 fence->execbuf.num_cliprects = 0;
293 fence->execbuf.DR1 = 0;
294 fence->execbuf.DR4 = 0;
295
296 fence->execbuf.flags =
297 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
298 fence->execbuf.rsvd1 = device->context_id;
299 fence->execbuf.rsvd2 = 0;
300
301 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
302 fence->state = ANV_FENCE_STATE_SIGNALED;
303 } else {
304 fence->state = ANV_FENCE_STATE_RESET;
305 }
306
307 *pFence = anv_fence_to_handle(fence);
308
309 return VK_SUCCESS;
310 }
311
312 void anv_DestroyFence(
313 VkDevice _device,
314 VkFence _fence,
315 const VkAllocationCallbacks* pAllocator)
316 {
317 ANV_FROM_HANDLE(anv_device, device, _device);
318 ANV_FROM_HANDLE(anv_fence, fence, _fence);
319
320 if (!fence)
321 return;
322
323 assert(fence->bo.map == fence);
324 anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
325 }
326
327 VkResult anv_ResetFences(
328 VkDevice _device,
329 uint32_t fenceCount,
330 const VkFence* pFences)
331 {
332 for (uint32_t i = 0; i < fenceCount; i++) {
333 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
334 fence->state = ANV_FENCE_STATE_RESET;
335 }
336
337 return VK_SUCCESS;
338 }
339
340 VkResult anv_GetFenceStatus(
341 VkDevice _device,
342 VkFence _fence)
343 {
344 ANV_FROM_HANDLE(anv_device, device, _device);
345 ANV_FROM_HANDLE(anv_fence, fence, _fence);
346
347 if (unlikely(device->lost))
348 return VK_ERROR_DEVICE_LOST;
349
350 switch (fence->state) {
351 case ANV_FENCE_STATE_RESET:
352 /* If it hasn't even been sent off to the GPU yet, it's not ready */
353 return VK_NOT_READY;
354
355 case ANV_FENCE_STATE_SIGNALED:
356 /* It's been signaled, return success */
357 return VK_SUCCESS;
358
359 case ANV_FENCE_STATE_SUBMITTED: {
360 VkResult result = anv_device_bo_busy(device, &fence->bo);
361 if (result == VK_SUCCESS) {
362 fence->state = ANV_FENCE_STATE_SIGNALED;
363 return VK_SUCCESS;
364 } else {
365 return result;
366 }
367 }
368 default:
369 unreachable("Invalid fence status");
370 }
371 }
372
373 #define NSEC_PER_SEC 1000000000
374 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
375
376 VkResult anv_WaitForFences(
377 VkDevice _device,
378 uint32_t fenceCount,
379 const VkFence* pFences,
380 VkBool32 waitAll,
381 uint64_t _timeout)
382 {
383 ANV_FROM_HANDLE(anv_device, device, _device);
384 int ret;
385
386 if (unlikely(device->lost))
387 return VK_ERROR_DEVICE_LOST;
388
389 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
390 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
391 * for a couple of kernel releases. Since there's no way to know
392 * whether or not the kernel we're using is one of the broken ones, the
393 * best we can do is to clamp the timeout to INT64_MAX. This limits the
394 * maximum timeout from 584 years to 292 years - likely not a big deal.
395 */
396 int64_t timeout = MIN2(_timeout, INT64_MAX);
397
398 VkResult result = VK_SUCCESS;
399 uint32_t pending_fences = fenceCount;
400 while (pending_fences) {
401 pending_fences = 0;
402 bool signaled_fences = false;
403 for (uint32_t i = 0; i < fenceCount; i++) {
404 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
405 switch (fence->state) {
406 case ANV_FENCE_STATE_RESET:
407 /* This fence hasn't been submitted yet, we'll catch it the next
408 * time around. Yes, this may mean we dead-loop but, short of
409 * lots of locking and a condition variable, there's not much that
410 * we can do about that.
411 */
412 pending_fences++;
413 continue;
414
415 case ANV_FENCE_STATE_SIGNALED:
416 /* This fence is not pending. If waitAll isn't set, we can return
417 * early. Otherwise, we have to keep going.
418 */
419 if (!waitAll) {
420 result = VK_SUCCESS;
421 goto done;
422 }
423 continue;
424
425 case ANV_FENCE_STATE_SUBMITTED:
426 /* These are the fences we really care about. Go ahead and wait
427 * on it until we hit a timeout.
428 */
429 result = anv_device_wait(device, &fence->bo, timeout);
430 switch (result) {
431 case VK_SUCCESS:
432 fence->state = ANV_FENCE_STATE_SIGNALED;
433 signaled_fences = true;
434 if (!waitAll)
435 goto done;
436 break;
437
438 case VK_TIMEOUT:
439 goto done;
440
441 default:
442 return result;
443 }
444 }
445 }
446
447 if (pending_fences && !signaled_fences) {
448 /* If we've hit this then someone decided to vkWaitForFences before
449 * they've actually submitted any of them to a queue. This is a
450 * fairly pessimal case, so it's ok to lock here and use a standard
451 * pthreads condition variable.
452 */
453 pthread_mutex_lock(&device->mutex);
454
455 /* It's possible that some of the fences have changed state since the
456 * last time we checked. Now that we have the lock, check for
457 * pending fences again and don't wait if it's changed.
458 */
459 uint32_t now_pending_fences = 0;
460 for (uint32_t i = 0; i < fenceCount; i++) {
461 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
462 if (fence->state == ANV_FENCE_STATE_RESET)
463 now_pending_fences++;
464 }
465 assert(now_pending_fences <= pending_fences);
466
467 if (now_pending_fences == pending_fences) {
468 struct timespec before;
469 clock_gettime(CLOCK_MONOTONIC, &before);
470
471 uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
472 uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
473 (timeout / NSEC_PER_SEC);
474 abs_nsec %= NSEC_PER_SEC;
475
476 /* Avoid roll-over in tv_sec on 32-bit systems if the user
477 * provided timeout is UINT64_MAX
478 */
479 struct timespec abstime;
480 abstime.tv_nsec = abs_nsec;
481 abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
482
483 ret = pthread_cond_timedwait(&device->queue_submit,
484 &device->mutex, &abstime);
485 assert(ret != EINVAL);
486
487 struct timespec after;
488 clock_gettime(CLOCK_MONOTONIC, &after);
489 uint64_t time_elapsed =
490 ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
491 ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
492
493 if (time_elapsed >= timeout) {
494 pthread_mutex_unlock(&device->mutex);
495 result = VK_TIMEOUT;
496 goto done;
497 }
498
499 timeout -= time_elapsed;
500 }
501
502 pthread_mutex_unlock(&device->mutex);
503 }
504 }
505
506 done:
507 if (unlikely(device->lost))
508 return VK_ERROR_DEVICE_LOST;
509
510 return result;
511 }
512
513 // Queue semaphore functions
514
515 VkResult anv_CreateSemaphore(
516 VkDevice _device,
517 const VkSemaphoreCreateInfo* pCreateInfo,
518 const VkAllocationCallbacks* pAllocator,
519 VkSemaphore* pSemaphore)
520 {
521 ANV_FROM_HANDLE(anv_device, device, _device);
522 struct anv_semaphore *semaphore;
523
524 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
525
526 semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
527 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
528 if (semaphore == NULL)
529 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
530
531 const VkExportSemaphoreCreateInfoKHX *export =
532 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHX);
533 VkExternalSemaphoreHandleTypeFlagsKHX handleTypes =
534 export ? export->handleTypes : 0;
535
536 if (handleTypes == 0) {
537 /* The DRM execbuffer ioctl always execute in-oder so long as you stay
538 * on the same ring. Since we don't expose the blit engine as a DMA
539 * queue, a dummy no-op semaphore is a perfectly valid implementation.
540 */
541 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
542 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX) {
543 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX);
544
545 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
546 VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
547 4096, &semaphore->permanent.bo);
548 if (result != VK_SUCCESS) {
549 vk_free2(&device->alloc, pAllocator, semaphore);
550 return result;
551 }
552
553 /* If we're going to use this as a fence, we need to *not* have the
554 * EXEC_OBJECT_ASYNC bit set.
555 */
556 assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
557 } else {
558 assert(!"Unknown handle type");
559 vk_free2(&device->alloc, pAllocator, semaphore);
560 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
561 }
562
563 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
564
565 *pSemaphore = anv_semaphore_to_handle(semaphore);
566
567 return VK_SUCCESS;
568 }
569
570 static void
571 anv_semaphore_impl_cleanup(struct anv_device *device,
572 struct anv_semaphore_impl *impl)
573 {
574 switch (impl->type) {
575 case ANV_SEMAPHORE_TYPE_NONE:
576 case ANV_SEMAPHORE_TYPE_DUMMY:
577 /* Dummy. Nothing to do */
578 return;
579
580 case ANV_SEMAPHORE_TYPE_BO:
581 anv_bo_cache_release(device, &device->bo_cache, impl->bo);
582 return;
583 }
584
585 unreachable("Invalid semaphore type");
586 }
587
588 void anv_DestroySemaphore(
589 VkDevice _device,
590 VkSemaphore _semaphore,
591 const VkAllocationCallbacks* pAllocator)
592 {
593 ANV_FROM_HANDLE(anv_device, device, _device);
594 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
595
596 if (semaphore == NULL)
597 return;
598
599 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
600 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
601
602 vk_free2(&device->alloc, pAllocator, semaphore);
603 }
604
605 void anv_GetPhysicalDeviceExternalSemaphorePropertiesKHX(
606 VkPhysicalDevice physicalDevice,
607 const VkPhysicalDeviceExternalSemaphoreInfoKHX* pExternalSemaphoreInfo,
608 VkExternalSemaphorePropertiesKHX* pExternalSemaphoreProperties)
609 {
610 switch (pExternalSemaphoreInfo->handleType) {
611 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX:
612 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
613 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX;
614 pExternalSemaphoreProperties->compatibleHandleTypes =
615 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX;
616 pExternalSemaphoreProperties->externalSemaphoreFeatures =
617 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHX |
618 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHX;
619 break;
620
621 default:
622 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
623 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
624 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
625 }
626 }
627
628 VkResult anv_ImportSemaphoreFdKHX(
629 VkDevice _device,
630 const VkImportSemaphoreFdInfoKHX* pImportSemaphoreFdInfo)
631 {
632 ANV_FROM_HANDLE(anv_device, device, _device);
633 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
634
635 switch (pImportSemaphoreFdInfo->handleType) {
636 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHX: {
637 struct anv_bo *bo;
638 VkResult result = anv_bo_cache_import(device, &device->bo_cache,
639 pImportSemaphoreFdInfo->fd, 4096,
640 &bo);
641 if (result != VK_SUCCESS)
642 return result;
643
644 /* If we're going to use this as a fence, we need to *not* have the
645 * EXEC_OBJECT_ASYNC bit set.
646 */
647 assert(!(bo->flags & EXEC_OBJECT_ASYNC));
648
649 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
650
651 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
652 semaphore->permanent.bo = bo;
653
654 return VK_SUCCESS;
655 }
656
657 default:
658 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
659 }
660 }
661
662 VkResult anv_GetSemaphoreFdKHX(
663 VkDevice _device,
664 VkSemaphore _semaphore,
665 VkExternalSemaphoreHandleTypeFlagBitsKHX handleType,
666 int* pFd)
667 {
668 ANV_FROM_HANDLE(anv_device, device, _device);
669 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
670
671 switch (semaphore->permanent.type) {
672 case ANV_SEMAPHORE_TYPE_BO:
673 return anv_bo_cache_export(device, &device->bo_cache,
674 semaphore->permanent.bo, pFd);
675
676 default:
677 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX);
678 }
679
680 return VK_SUCCESS;
681 }