anv: Add allocator support for client-visible addresses
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 uint64_t anv_gettime_ns(void)
38 {
39 struct timespec current;
40 clock_gettime(CLOCK_MONOTONIC, &current);
41 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
42 }
43
44 uint64_t anv_get_absolute_timeout(uint64_t timeout)
45 {
46 if (timeout == 0)
47 return 0;
48 uint64_t current_time = anv_gettime_ns();
49 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
50
51 timeout = MIN2(max_timeout, timeout);
52
53 return (current_time + timeout);
54 }
55
56 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
57 {
58 uint64_t now = anv_gettime_ns();
59
60 /* We don't want negative timeouts.
61 *
62 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
63 * supposed to block indefinitely timeouts < 0. Unfortunately,
64 * this was broken for a couple of kernel releases. Since there's
65 * no way to know whether or not the kernel we're using is one of
66 * the broken ones, the best we can do is to clamp the timeout to
67 * INT64_MAX. This limits the maximum timeout from 584 years to
68 * 292 years - likely not a big deal.
69 */
70 if (abs_timeout < now)
71 return 0;
72
73 uint64_t rel_timeout = abs_timeout - now;
74 if (rel_timeout > (uint64_t) INT64_MAX)
75 rel_timeout = INT64_MAX;
76
77 return rel_timeout;
78 }
79
80 static struct anv_semaphore *anv_semaphore_ref(struct anv_semaphore *semaphore);
81 static void anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore);
82 static void anv_semaphore_impl_cleanup(struct anv_device *device,
83 struct anv_semaphore_impl *impl);
84
85 static void
86 anv_queue_submit_free(struct anv_device *device,
87 struct anv_queue_submit *submit)
88 {
89 const VkAllocationCallbacks *alloc = submit->alloc;
90
91 for (uint32_t i = 0; i < submit->temporary_semaphore_count; i++)
92 anv_semaphore_impl_cleanup(device, &submit->temporary_semaphores[i]);
93 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++)
94 anv_semaphore_unref(device, submit->sync_fd_semaphores[i]);
95 /* Execbuf does not consume the in_fence. It's our job to close it. */
96 if (submit->in_fence != -1)
97 close(submit->in_fence);
98 if (submit->out_fence != -1)
99 close(submit->out_fence);
100 vk_free(alloc, submit->fences);
101 vk_free(alloc, submit->temporary_semaphores);
102 vk_free(alloc, submit->wait_timelines);
103 vk_free(alloc, submit->wait_timeline_values);
104 vk_free(alloc, submit->signal_timelines);
105 vk_free(alloc, submit->signal_timeline_values);
106 vk_free(alloc, submit->fence_bos);
107 vk_free(alloc, submit);
108 }
109
110 static bool
111 anv_queue_submit_ready_locked(struct anv_queue_submit *submit)
112 {
113 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
114 if (submit->wait_timeline_values[i] > submit->wait_timelines[i]->highest_pending)
115 return false;
116 }
117
118 return true;
119 }
120
121 static VkResult
122 anv_timeline_init(struct anv_device *device,
123 struct anv_timeline *timeline,
124 uint64_t initial_value)
125 {
126 timeline->highest_past =
127 timeline->highest_pending = initial_value;
128 list_inithead(&timeline->points);
129 list_inithead(&timeline->free_points);
130
131 return VK_SUCCESS;
132 }
133
134 static void
135 anv_timeline_finish(struct anv_device *device,
136 struct anv_timeline *timeline)
137 {
138 list_for_each_entry_safe(struct anv_timeline_point, point,
139 &timeline->free_points, link) {
140 list_del(&point->link);
141 anv_device_release_bo(device, point->bo);
142 vk_free(&device->alloc, point);
143 }
144 list_for_each_entry_safe(struct anv_timeline_point, point,
145 &timeline->points, link) {
146 list_del(&point->link);
147 anv_device_release_bo(device, point->bo);
148 vk_free(&device->alloc, point);
149 }
150 }
151
152 static VkResult
153 anv_timeline_add_point_locked(struct anv_device *device,
154 struct anv_timeline *timeline,
155 uint64_t value,
156 struct anv_timeline_point **point)
157 {
158 VkResult result = VK_SUCCESS;
159
160 if (list_is_empty(&timeline->free_points)) {
161 *point =
162 vk_zalloc(&device->alloc, sizeof(**point),
163 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
164 if (!(*point))
165 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
166 if (result == VK_SUCCESS) {
167 result = anv_device_alloc_bo(device, 4096,
168 ANV_BO_ALLOC_EXTERNAL |
169 ANV_BO_ALLOC_IMPLICIT_SYNC,
170 0 /* explicit_address */,
171 &(*point)->bo);
172 if (result != VK_SUCCESS)
173 vk_free(&device->alloc, *point);
174 }
175 } else {
176 *point = list_first_entry(&timeline->free_points,
177 struct anv_timeline_point, link);
178 list_del(&(*point)->link);
179 }
180
181 if (result == VK_SUCCESS) {
182 (*point)->serial = value;
183 list_addtail(&(*point)->link, &timeline->points);
184 }
185
186 return result;
187 }
188
189 static VkResult
190 anv_timeline_gc_locked(struct anv_device *device,
191 struct anv_timeline *timeline)
192 {
193 list_for_each_entry_safe(struct anv_timeline_point, point,
194 &timeline->points, link) {
195 /* timeline->higest_pending is only incremented once submission has
196 * happened. If this point has a greater serial, it means the point
197 * hasn't been submitted yet.
198 */
199 if (point->serial > timeline->highest_pending)
200 return VK_SUCCESS;
201
202 /* If someone is waiting on this time point, consider it busy and don't
203 * try to recycle it. There's a slim possibility that it's no longer
204 * busy by the time we look at it but we would be recycling it out from
205 * under a waiter and that can lead to weird races.
206 *
207 * We walk the list in-order so if this time point is still busy so is
208 * every following time point
209 */
210 assert(point->waiting >= 0);
211 if (point->waiting)
212 return VK_SUCCESS;
213
214 /* Garbage collect any signaled point. */
215 VkResult result = anv_device_bo_busy(device, point->bo);
216 if (result == VK_NOT_READY) {
217 /* We walk the list in-order so if this time point is still busy so
218 * is every following time point
219 */
220 return VK_SUCCESS;
221 } else if (result != VK_SUCCESS) {
222 return result;
223 }
224
225 assert(timeline->highest_past < point->serial);
226 timeline->highest_past = point->serial;
227
228 list_del(&point->link);
229 list_add(&point->link, &timeline->free_points);
230 }
231
232 return VK_SUCCESS;
233 }
234
235 static VkResult anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
236 struct anv_bo *bo,
237 bool signal);
238
239 static VkResult
240 anv_queue_submit_timeline_locked(struct anv_queue *queue,
241 struct anv_queue_submit *submit)
242 {
243 VkResult result;
244
245 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
246 struct anv_timeline *timeline = submit->wait_timelines[i];
247 uint64_t wait_value = submit->wait_timeline_values[i];
248
249 if (timeline->highest_past >= wait_value)
250 continue;
251
252 list_for_each_entry(struct anv_timeline_point, point, &timeline->points, link) {
253 if (point->serial < wait_value)
254 continue;
255 result = anv_queue_submit_add_fence_bo(submit, point->bo, false);
256 if (result != VK_SUCCESS)
257 return result;
258 break;
259 }
260 }
261 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
262 struct anv_timeline *timeline = submit->signal_timelines[i];
263 uint64_t signal_value = submit->signal_timeline_values[i];
264 struct anv_timeline_point *point;
265
266 result = anv_timeline_add_point_locked(queue->device, timeline,
267 signal_value, &point);
268 if (result != VK_SUCCESS)
269 return result;
270
271 result = anv_queue_submit_add_fence_bo(submit, point->bo, true);
272 if (result != VK_SUCCESS)
273 return result;
274 }
275
276 result = anv_queue_execbuf_locked(queue, submit);
277
278 if (result == VK_SUCCESS) {
279 /* Update the pending values in the timeline objects. */
280 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
281 struct anv_timeline *timeline = submit->signal_timelines[i];
282 uint64_t signal_value = submit->signal_timeline_values[i];
283
284 assert(signal_value > timeline->highest_pending);
285 timeline->highest_pending = signal_value;
286 }
287
288 /* Update signaled semaphores backed by syncfd. */
289 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++) {
290 struct anv_semaphore *semaphore = submit->sync_fd_semaphores[i];
291 /* Out fences can't have temporary state because that would imply
292 * that we imported a sync file and are trying to signal it.
293 */
294 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
295 struct anv_semaphore_impl *impl = &semaphore->permanent;
296
297 assert(impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE);
298 impl->fd = dup(submit->out_fence);
299 }
300 } else {
301 /* Unblock any waiter by signaling the points, the application will get
302 * a device lost error code.
303 */
304 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
305 struct anv_timeline *timeline = submit->signal_timelines[i];
306 uint64_t signal_value = submit->signal_timeline_values[i];
307
308 assert(signal_value > timeline->highest_pending);
309 timeline->highest_past = timeline->highest_pending = signal_value;
310 }
311 }
312
313 return result;
314 }
315
316 static VkResult
317 anv_queue_submit_deferred_locked(struct anv_queue *queue, uint32_t *advance)
318 {
319 VkResult result = VK_SUCCESS;
320
321 /* Go through all the queued submissions and submit then until we find one
322 * that's waiting on a point that hasn't materialized yet.
323 */
324 list_for_each_entry_safe(struct anv_queue_submit, submit,
325 &queue->queued_submits, link) {
326 if (!anv_queue_submit_ready_locked(submit))
327 break;
328
329 (*advance)++;
330 list_del(&submit->link);
331
332 result = anv_queue_submit_timeline_locked(queue, submit);
333
334 anv_queue_submit_free(queue->device, submit);
335
336 if (result != VK_SUCCESS)
337 break;
338 }
339
340 return result;
341 }
342
343 static VkResult
344 anv_device_submit_deferred_locked(struct anv_device *device)
345 {
346 uint32_t advance = 0;
347 return anv_queue_submit_deferred_locked(&device->queue, &advance);
348 }
349
350 static VkResult
351 _anv_queue_submit(struct anv_queue *queue, struct anv_queue_submit **_submit)
352 {
353 struct anv_queue_submit *submit = *_submit;
354
355 /* Wait before signal behavior means we might keep alive the
356 * anv_queue_submit object a bit longer, so transfer the ownership to the
357 * anv_queue.
358 */
359 *_submit = NULL;
360
361 pthread_mutex_lock(&queue->device->mutex);
362 list_addtail(&submit->link, &queue->queued_submits);
363 VkResult result = anv_device_submit_deferred_locked(queue->device);
364 pthread_mutex_unlock(&queue->device->mutex);
365 return result;
366 }
367
368 VkResult
369 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
370 {
371 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
372 queue->device = device;
373 queue->flags = 0;
374
375 list_inithead(&queue->queued_submits);
376
377 return VK_SUCCESS;
378 }
379
380 void
381 anv_queue_finish(struct anv_queue *queue)
382 {
383 }
384
385 static VkResult
386 anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
387 struct anv_bo *bo,
388 bool signal)
389 {
390 if (submit->fence_bo_count >= submit->fence_bo_array_length) {
391 uint32_t new_len = MAX2(submit->fence_bo_array_length * 2, 64);
392
393 submit->fence_bos =
394 vk_realloc(submit->alloc,
395 submit->fence_bos, new_len * sizeof(*submit->fence_bos),
396 8, submit->alloc_scope);
397 if (submit->fence_bos == NULL)
398 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
399
400 submit->fence_bo_array_length = new_len;
401 }
402
403 /* Take advantage that anv_bo are allocated at 8 byte alignement so we can
404 * use the lowest bit to store whether this is a BO we need to signal.
405 */
406 submit->fence_bos[submit->fence_bo_count++] = anv_pack_ptr(bo, 1, signal);
407
408 return VK_SUCCESS;
409 }
410
411 static VkResult
412 anv_queue_submit_add_syncobj(struct anv_queue_submit* submit,
413 struct anv_device *device,
414 uint32_t handle, uint32_t flags)
415 {
416 assert(flags != 0);
417
418 if (submit->fence_count >= submit->fence_array_length) {
419 uint32_t new_len = MAX2(submit->fence_array_length * 2, 64);
420
421 submit->fences =
422 vk_realloc(submit->alloc,
423 submit->fences, new_len * sizeof(*submit->fences),
424 8, submit->alloc_scope);
425 if (submit->fences == NULL)
426 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
427
428 submit->fence_array_length = new_len;
429 }
430
431 submit->fences[submit->fence_count++] = (struct drm_i915_gem_exec_fence) {
432 .handle = handle,
433 .flags = flags,
434 };
435
436 return VK_SUCCESS;
437 }
438
439 static VkResult
440 anv_queue_submit_add_sync_fd_fence(struct anv_queue_submit *submit,
441 struct anv_semaphore *semaphore)
442 {
443 if (submit->sync_fd_semaphore_count >= submit->sync_fd_semaphore_array_length) {
444 uint32_t new_len = MAX2(submit->sync_fd_semaphore_array_length * 2, 64);
445 struct anv_semaphore **new_semaphores =
446 vk_realloc(submit->alloc, submit->sync_fd_semaphores,
447 new_len * sizeof(*submit->sync_fd_semaphores), 8,
448 submit->alloc_scope);
449 if (new_semaphores == NULL)
450 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
451
452 submit->sync_fd_semaphores = new_semaphores;
453 }
454
455 submit->sync_fd_semaphores[submit->sync_fd_semaphore_count++] =
456 anv_semaphore_ref(semaphore);
457 submit->need_out_fence = true;
458
459 return VK_SUCCESS;
460 }
461
462 static VkResult
463 anv_queue_submit_add_timeline_wait(struct anv_queue_submit* submit,
464 struct anv_device *device,
465 struct anv_timeline *timeline,
466 uint64_t value)
467 {
468 if (submit->wait_timeline_count >= submit->wait_timeline_array_length) {
469 uint32_t new_len = MAX2(submit->wait_timeline_array_length * 2, 64);
470
471 submit->wait_timelines =
472 vk_realloc(submit->alloc,
473 submit->wait_timelines, new_len * sizeof(*submit->wait_timelines),
474 8, submit->alloc_scope);
475 if (submit->wait_timelines == NULL)
476 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
477
478 submit->wait_timeline_values =
479 vk_realloc(submit->alloc,
480 submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
481 8, submit->alloc_scope);
482 if (submit->wait_timeline_values == NULL)
483 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
484
485 submit->wait_timeline_array_length = new_len;
486 }
487
488 submit->wait_timelines[submit->wait_timeline_count] = timeline;
489 submit->wait_timeline_values[submit->wait_timeline_count] = value;
490
491 submit->wait_timeline_count++;
492
493 return VK_SUCCESS;
494 }
495
496 static VkResult
497 anv_queue_submit_add_timeline_signal(struct anv_queue_submit* submit,
498 struct anv_device *device,
499 struct anv_timeline *timeline,
500 uint64_t value)
501 {
502 assert(timeline->highest_pending < value);
503
504 if (submit->signal_timeline_count >= submit->signal_timeline_array_length) {
505 uint32_t new_len = MAX2(submit->signal_timeline_array_length * 2, 64);
506
507 submit->signal_timelines =
508 vk_realloc(submit->alloc,
509 submit->signal_timelines, new_len * sizeof(*submit->signal_timelines),
510 8, submit->alloc_scope);
511 if (submit->signal_timelines == NULL)
512 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
513
514 submit->signal_timeline_values =
515 vk_realloc(submit->alloc,
516 submit->signal_timeline_values, new_len * sizeof(*submit->signal_timeline_values),
517 8, submit->alloc_scope);
518 if (submit->signal_timeline_values == NULL)
519 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
520
521 submit->signal_timeline_array_length = new_len;
522 }
523
524 submit->signal_timelines[submit->signal_timeline_count] = timeline;
525 submit->signal_timeline_values[submit->signal_timeline_count] = value;
526
527 submit->signal_timeline_count++;
528
529 return VK_SUCCESS;
530 }
531
532 static struct anv_queue_submit *
533 anv_queue_submit_alloc(struct anv_device *device)
534 {
535 const VkAllocationCallbacks *alloc = &device->alloc;
536 VkSystemAllocationScope alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE;
537
538 struct anv_queue_submit *submit = vk_zalloc(alloc, sizeof(*submit), 8, alloc_scope);
539 if (!submit)
540 return NULL;
541
542 submit->alloc = alloc;
543 submit->alloc_scope = alloc_scope;
544 submit->in_fence = -1;
545 submit->out_fence = -1;
546
547 return submit;
548 }
549
550 VkResult
551 anv_queue_submit_simple_batch(struct anv_queue *queue,
552 struct anv_batch *batch)
553 {
554 struct anv_device *device = queue->device;
555 struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
556 if (!submit)
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558
559 bool has_syncobj_wait = device->instance->physicalDevice.has_syncobj_wait;
560 VkResult result;
561 uint32_t syncobj;
562 struct anv_bo *batch_bo, *sync_bo;
563
564 if (has_syncobj_wait) {
565 syncobj = anv_gem_syncobj_create(device, 0);
566 if (!syncobj) {
567 result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
568 goto err_free_submit;
569 }
570
571 result = anv_queue_submit_add_syncobj(submit, device, syncobj,
572 I915_EXEC_FENCE_SIGNAL);
573 } else {
574 result = anv_device_alloc_bo(device, 4096,
575 ANV_BO_ALLOC_EXTERNAL |
576 ANV_BO_ALLOC_IMPLICIT_SYNC,
577 0 /* explicit_address */,
578 &sync_bo);
579 if (result != VK_SUCCESS)
580 goto err_free_submit;
581
582 result = anv_queue_submit_add_fence_bo(submit, sync_bo, true /* signal */);
583 }
584
585 if (result != VK_SUCCESS)
586 goto err_destroy_sync_primitive;
587
588 if (batch) {
589 uint32_t size = align_u32(batch->next - batch->start, 8);
590 result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &batch_bo);
591 if (result != VK_SUCCESS)
592 goto err_destroy_sync_primitive;
593
594 memcpy(batch_bo->map, batch->start, size);
595 if (!device->info.has_llc)
596 gen_flush_range(batch_bo->map, size);
597
598 submit->simple_bo = batch_bo;
599 submit->simple_bo_size = size;
600 }
601
602 result = _anv_queue_submit(queue, &submit);
603
604 if (result == VK_SUCCESS) {
605 if (has_syncobj_wait) {
606 if (anv_gem_syncobj_wait(device, &syncobj, 1,
607 anv_get_absolute_timeout(INT64_MAX), true))
608 result = anv_device_set_lost(device, "anv_gem_syncobj_wait failed: %m");
609 anv_gem_syncobj_destroy(device, syncobj);
610 } else {
611 result = anv_device_wait(device, sync_bo,
612 anv_get_relative_timeout(INT64_MAX));
613 anv_device_release_bo(device, sync_bo);
614 }
615 }
616
617 if (batch)
618 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
619
620 if (submit)
621 anv_queue_submit_free(device, submit);
622
623 return result;
624
625 err_destroy_sync_primitive:
626 if (has_syncobj_wait)
627 anv_gem_syncobj_destroy(device, syncobj);
628 else
629 anv_device_release_bo(device, sync_bo);
630 err_free_submit:
631 if (submit)
632 anv_queue_submit_free(device, submit);
633
634 return result;
635 }
636
637 /* Transfer ownership of temporary semaphores from the VkSemaphore object to
638 * the anv_queue_submit object. Those temporary semaphores are then freed in
639 * anv_queue_submit_free() once the driver is finished with them.
640 */
641 static VkResult
642 maybe_transfer_temporary_semaphore(struct anv_queue_submit *submit,
643 struct anv_semaphore *semaphore,
644 struct anv_semaphore_impl **out_impl)
645 {
646 struct anv_semaphore_impl *impl = &semaphore->temporary;
647
648 if (impl->type == ANV_SEMAPHORE_TYPE_NONE) {
649 *out_impl = &semaphore->permanent;
650 return VK_SUCCESS;
651 }
652
653 /* BO backed timeline semaphores cannot be temporary. */
654 assert(impl->type != ANV_SEMAPHORE_TYPE_TIMELINE);
655
656 /*
657 * There is a requirement to reset semaphore to their permanent state after
658 * submission. From the Vulkan 1.0.53 spec:
659 *
660 * "If the import is temporary, the implementation must restore the
661 * semaphore to its prior permanent state after submitting the next
662 * semaphore wait operation."
663 *
664 * In the case we defer the actual submission to a thread because of the
665 * wait-before-submit behavior required for timeline semaphores, we need to
666 * make copies of the temporary syncobj to ensure they stay alive until we
667 * do the actual execbuffer ioctl.
668 */
669 if (submit->temporary_semaphore_count >= submit->temporary_semaphore_array_length) {
670 uint32_t new_len = MAX2(submit->temporary_semaphore_array_length * 2, 8);
671 /* Make sure that if the realloc fails, we still have the old semaphore
672 * array around to properly clean things up on failure.
673 */
674 struct anv_semaphore_impl *new_array =
675 vk_realloc(submit->alloc,
676 submit->temporary_semaphores,
677 new_len * sizeof(*submit->temporary_semaphores),
678 8, submit->alloc_scope);
679 if (new_array == NULL)
680 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
681
682 submit->temporary_semaphores = new_array;
683 submit->temporary_semaphore_array_length = new_len;
684 }
685
686 /* Copy anv_semaphore_impl into anv_queue_submit. */
687 submit->temporary_semaphores[submit->temporary_semaphore_count++] = *impl;
688 *out_impl = &submit->temporary_semaphores[submit->temporary_semaphore_count - 1];
689
690 /* Clear the incoming semaphore */
691 impl->type = ANV_SEMAPHORE_TYPE_NONE;
692
693 return VK_SUCCESS;
694 }
695
696 static VkResult
697 anv_queue_submit(struct anv_queue *queue,
698 struct anv_cmd_buffer *cmd_buffer,
699 const VkSemaphore *in_semaphores,
700 const uint64_t *in_values,
701 uint32_t num_in_semaphores,
702 const VkSemaphore *out_semaphores,
703 const uint64_t *out_values,
704 uint32_t num_out_semaphores,
705 VkFence _fence)
706 {
707 ANV_FROM_HANDLE(anv_fence, fence, _fence);
708 struct anv_device *device = queue->device;
709 UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
710 struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
711 if (!submit)
712 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
713
714 submit->cmd_buffer = cmd_buffer;
715
716 VkResult result = VK_SUCCESS;
717
718 for (uint32_t i = 0; i < num_in_semaphores; i++) {
719 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
720 struct anv_semaphore_impl *impl;
721
722 result = maybe_transfer_temporary_semaphore(submit, semaphore, &impl);
723 if (result != VK_SUCCESS)
724 goto error;
725
726 switch (impl->type) {
727 case ANV_SEMAPHORE_TYPE_BO:
728 assert(!pdevice->has_syncobj);
729 result = anv_queue_submit_add_fence_bo(submit, impl->bo, false /* signal */);
730 if (result != VK_SUCCESS)
731 goto error;
732 break;
733
734 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
735 assert(!pdevice->has_syncobj);
736 if (submit->in_fence == -1) {
737 submit->in_fence = impl->fd;
738 if (submit->in_fence == -1) {
739 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
740 goto error;
741 }
742 impl->fd = -1;
743 } else {
744 int merge = anv_gem_sync_file_merge(device, submit->in_fence, impl->fd);
745 if (merge == -1) {
746 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
747 goto error;
748 }
749 close(impl->fd);
750 close(submit->in_fence);
751 impl->fd = -1;
752 submit->in_fence = merge;
753 }
754 break;
755
756 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
757 result = anv_queue_submit_add_syncobj(submit, device,
758 impl->syncobj,
759 I915_EXEC_FENCE_WAIT);
760 if (result != VK_SUCCESS)
761 goto error;
762 break;
763 }
764
765 case ANV_SEMAPHORE_TYPE_TIMELINE:
766 result = anv_queue_submit_add_timeline_wait(submit, device,
767 &impl->timeline,
768 in_values ? in_values[i] : 0);
769 if (result != VK_SUCCESS)
770 goto error;
771 break;
772
773 default:
774 break;
775 }
776 }
777
778 for (uint32_t i = 0; i < num_out_semaphores; i++) {
779 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
780
781 /* Under most circumstances, out fences won't be temporary. However,
782 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
783 *
784 * "If the import is temporary, the implementation must restore the
785 * semaphore to its prior permanent state after submitting the next
786 * semaphore wait operation."
787 *
788 * The spec says nothing whatsoever about signal operations on
789 * temporarily imported semaphores so it appears they are allowed.
790 * There are also CTS tests that require this to work.
791 */
792 struct anv_semaphore_impl *impl =
793 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
794 &semaphore->temporary : &semaphore->permanent;
795
796 switch (impl->type) {
797 case ANV_SEMAPHORE_TYPE_BO:
798 assert(!pdevice->has_syncobj);
799 result = anv_queue_submit_add_fence_bo(submit, impl->bo, true /* signal */);
800 if (result != VK_SUCCESS)
801 goto error;
802 break;
803
804 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
805 assert(!pdevice->has_syncobj);
806 result = anv_queue_submit_add_sync_fd_fence(submit, semaphore);
807 if (result != VK_SUCCESS)
808 goto error;
809 break;
810
811 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
812 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
813 I915_EXEC_FENCE_SIGNAL);
814 if (result != VK_SUCCESS)
815 goto error;
816 break;
817 }
818
819 case ANV_SEMAPHORE_TYPE_TIMELINE:
820 result = anv_queue_submit_add_timeline_signal(submit, device,
821 &impl->timeline,
822 out_values ? out_values[i] : 0);
823 if (result != VK_SUCCESS)
824 goto error;
825 break;
826
827 default:
828 break;
829 }
830 }
831
832 if (fence) {
833 /* Under most circumstances, out fences won't be temporary. However,
834 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
835 *
836 * "If the import is temporary, the implementation must restore the
837 * semaphore to its prior permanent state after submitting the next
838 * semaphore wait operation."
839 *
840 * The spec says nothing whatsoever about signal operations on
841 * temporarily imported semaphores so it appears they are allowed.
842 * There are also CTS tests that require this to work.
843 */
844 struct anv_fence_impl *impl =
845 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
846 &fence->temporary : &fence->permanent;
847
848 switch (impl->type) {
849 case ANV_FENCE_TYPE_BO:
850 result = anv_queue_submit_add_fence_bo(submit, impl->bo.bo, true /* signal */);
851 if (result != VK_SUCCESS)
852 goto error;
853 break;
854
855 case ANV_FENCE_TYPE_SYNCOBJ: {
856 /*
857 * For the same reason we reset the signaled binary syncobj above,
858 * also reset the fence's syncobj so that they don't contain a
859 * signaled dma-fence.
860 */
861 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
862 I915_EXEC_FENCE_SIGNAL);
863 if (result != VK_SUCCESS)
864 goto error;
865 break;
866 }
867
868 default:
869 unreachable("Invalid fence type");
870 }
871 }
872
873 result = _anv_queue_submit(queue, &submit);
874 if (result != VK_SUCCESS)
875 goto error;
876
877 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
878 /* BO fences can't be shared, so they can't be temporary. */
879 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
880
881 /* Once the execbuf has returned, we need to set the fence state to
882 * SUBMITTED. We can't do this before calling execbuf because
883 * anv_GetFenceStatus does take the global device lock before checking
884 * fence->state.
885 *
886 * We set the fence state to SUBMITTED regardless of whether or not the
887 * execbuf succeeds because we need to ensure that vkWaitForFences() and
888 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
889 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
890 */
891 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
892 }
893
894 error:
895 if (submit)
896 anv_queue_submit_free(device, submit);
897
898 return result;
899 }
900
901 VkResult anv_QueueSubmit(
902 VkQueue _queue,
903 uint32_t submitCount,
904 const VkSubmitInfo* pSubmits,
905 VkFence fence)
906 {
907 ANV_FROM_HANDLE(anv_queue, queue, _queue);
908
909 /* Query for device status prior to submitting. Technically, we don't need
910 * to do this. However, if we have a client that's submitting piles of
911 * garbage, we would rather break as early as possible to keep the GPU
912 * hanging contained. If we don't check here, we'll either be waiting for
913 * the kernel to kick us or we'll have to wait until the client waits on a
914 * fence before we actually know whether or not we've hung.
915 */
916 VkResult result = anv_device_query_status(queue->device);
917 if (result != VK_SUCCESS)
918 return result;
919
920 if (fence && submitCount == 0) {
921 /* If we don't have any command buffers, we need to submit a dummy
922 * batch to give GEM something to wait on. We could, potentially,
923 * come up with something more efficient but this shouldn't be a
924 * common case.
925 */
926 result = anv_queue_submit(queue, NULL, NULL, NULL, 0, NULL, NULL, 0, fence);
927 goto out;
928 }
929
930 for (uint32_t i = 0; i < submitCount; i++) {
931 /* Fence for this submit. NULL for all but the last one */
932 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
933
934 const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
935 vk_find_struct_const(pSubmits[i].pNext,
936 TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
937 const uint64_t *wait_values =
938 timeline_info && timeline_info->waitSemaphoreValueCount ?
939 timeline_info->pWaitSemaphoreValues : NULL;
940 const uint64_t *signal_values =
941 timeline_info && timeline_info->signalSemaphoreValueCount ?
942 timeline_info->pSignalSemaphoreValues : NULL;
943
944 if (pSubmits[i].commandBufferCount == 0) {
945 /* If we don't have any command buffers, we need to submit a dummy
946 * batch to give GEM something to wait on. We could, potentially,
947 * come up with something more efficient but this shouldn't be a
948 * common case.
949 */
950 result = anv_queue_submit(queue, NULL,
951 pSubmits[i].pWaitSemaphores,
952 wait_values,
953 pSubmits[i].waitSemaphoreCount,
954 pSubmits[i].pSignalSemaphores,
955 signal_values,
956 pSubmits[i].signalSemaphoreCount,
957 submit_fence);
958 if (result != VK_SUCCESS)
959 goto out;
960
961 continue;
962 }
963
964 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
965 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
966 pSubmits[i].pCommandBuffers[j]);
967 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
968 assert(!anv_batch_has_error(&cmd_buffer->batch));
969
970 /* Fence for this execbuf. NULL for all but the last one */
971 VkFence execbuf_fence =
972 (j == pSubmits[i].commandBufferCount - 1) ?
973 submit_fence : VK_NULL_HANDLE;
974
975 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
976 const uint64_t *in_values = NULL, *out_values = NULL;
977 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
978 if (j == 0) {
979 /* Only the first batch gets the in semaphores */
980 in_semaphores = pSubmits[i].pWaitSemaphores;
981 in_values = wait_values;
982 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
983 }
984
985 if (j == pSubmits[i].commandBufferCount - 1) {
986 /* Only the last batch gets the out semaphores */
987 out_semaphores = pSubmits[i].pSignalSemaphores;
988 out_values = signal_values;
989 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
990 }
991
992 result = anv_queue_submit(queue, cmd_buffer,
993 in_semaphores, in_values, num_in_semaphores,
994 out_semaphores, out_values, num_out_semaphores,
995 execbuf_fence);
996 if (result != VK_SUCCESS)
997 goto out;
998 }
999 }
1000
1001 out:
1002 if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) {
1003 /* In the case that something has gone wrong we may end up with an
1004 * inconsistent state from which it may not be trivial to recover.
1005 * For example, we might have computed address relocations and
1006 * any future attempt to re-submit this job will need to know about
1007 * this and avoid computing relocation addresses again.
1008 *
1009 * To avoid this sort of issues, we assume that if something was
1010 * wrong during submission we must already be in a really bad situation
1011 * anyway (such us being out of memory) and return
1012 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
1013 * submit the same job again to this device.
1014 *
1015 * We skip doing this on VK_ERROR_DEVICE_LOST because
1016 * anv_device_set_lost() would have been called already by a callee of
1017 * anv_queue_submit().
1018 */
1019 result = anv_device_set_lost(queue->device, "vkQueueSubmit() failed");
1020 }
1021
1022 return result;
1023 }
1024
1025 VkResult anv_QueueWaitIdle(
1026 VkQueue _queue)
1027 {
1028 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1029
1030 if (anv_device_is_lost(queue->device))
1031 return VK_ERROR_DEVICE_LOST;
1032
1033 return anv_queue_submit_simple_batch(queue, NULL);
1034 }
1035
1036 VkResult anv_CreateFence(
1037 VkDevice _device,
1038 const VkFenceCreateInfo* pCreateInfo,
1039 const VkAllocationCallbacks* pAllocator,
1040 VkFence* pFence)
1041 {
1042 ANV_FROM_HANDLE(anv_device, device, _device);
1043 struct anv_fence *fence;
1044
1045 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1046
1047 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1048 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1049 if (fence == NULL)
1050 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1051
1052 if (device->instance->physicalDevice.has_syncobj_wait) {
1053 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
1054
1055 uint32_t create_flags = 0;
1056 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
1057 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1058
1059 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
1060 if (!fence->permanent.syncobj)
1061 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1062 } else {
1063 fence->permanent.type = ANV_FENCE_TYPE_BO;
1064
1065 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
1066 &fence->permanent.bo.bo);
1067 if (result != VK_SUCCESS)
1068 return result;
1069
1070 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
1071 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1072 } else {
1073 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
1074 }
1075 }
1076
1077 *pFence = anv_fence_to_handle(fence);
1078
1079 return VK_SUCCESS;
1080 }
1081
1082 static void
1083 anv_fence_impl_cleanup(struct anv_device *device,
1084 struct anv_fence_impl *impl)
1085 {
1086 switch (impl->type) {
1087 case ANV_FENCE_TYPE_NONE:
1088 /* Dummy. Nothing to do */
1089 break;
1090
1091 case ANV_FENCE_TYPE_BO:
1092 anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
1093 break;
1094
1095 case ANV_FENCE_TYPE_SYNCOBJ:
1096 anv_gem_syncobj_destroy(device, impl->syncobj);
1097 break;
1098
1099 case ANV_FENCE_TYPE_WSI:
1100 impl->fence_wsi->destroy(impl->fence_wsi);
1101 break;
1102
1103 default:
1104 unreachable("Invalid fence type");
1105 }
1106
1107 impl->type = ANV_FENCE_TYPE_NONE;
1108 }
1109
1110 void anv_DestroyFence(
1111 VkDevice _device,
1112 VkFence _fence,
1113 const VkAllocationCallbacks* pAllocator)
1114 {
1115 ANV_FROM_HANDLE(anv_device, device, _device);
1116 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1117
1118 if (!fence)
1119 return;
1120
1121 anv_fence_impl_cleanup(device, &fence->temporary);
1122 anv_fence_impl_cleanup(device, &fence->permanent);
1123
1124 vk_free2(&device->alloc, pAllocator, fence);
1125 }
1126
1127 VkResult anv_ResetFences(
1128 VkDevice _device,
1129 uint32_t fenceCount,
1130 const VkFence* pFences)
1131 {
1132 ANV_FROM_HANDLE(anv_device, device, _device);
1133
1134 for (uint32_t i = 0; i < fenceCount; i++) {
1135 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1136
1137 /* From the Vulkan 1.0.53 spec:
1138 *
1139 * "If any member of pFences currently has its payload imported with
1140 * temporary permanence, that fence’s prior permanent payload is
1141 * first restored. The remaining operations described therefore
1142 * operate on the restored payload.
1143 */
1144 if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
1145 anv_fence_impl_cleanup(device, &fence->temporary);
1146
1147 struct anv_fence_impl *impl = &fence->permanent;
1148
1149 switch (impl->type) {
1150 case ANV_FENCE_TYPE_BO:
1151 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
1152 break;
1153
1154 case ANV_FENCE_TYPE_SYNCOBJ:
1155 anv_gem_syncobj_reset(device, impl->syncobj);
1156 break;
1157
1158 default:
1159 unreachable("Invalid fence type");
1160 }
1161 }
1162
1163 return VK_SUCCESS;
1164 }
1165
1166 VkResult anv_GetFenceStatus(
1167 VkDevice _device,
1168 VkFence _fence)
1169 {
1170 ANV_FROM_HANDLE(anv_device, device, _device);
1171 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1172
1173 if (anv_device_is_lost(device))
1174 return VK_ERROR_DEVICE_LOST;
1175
1176 struct anv_fence_impl *impl =
1177 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1178 &fence->temporary : &fence->permanent;
1179
1180 switch (impl->type) {
1181 case ANV_FENCE_TYPE_BO:
1182 /* BO fences don't support import/export */
1183 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1184 switch (impl->bo.state) {
1185 case ANV_BO_FENCE_STATE_RESET:
1186 /* If it hasn't even been sent off to the GPU yet, it's not ready */
1187 return VK_NOT_READY;
1188
1189 case ANV_BO_FENCE_STATE_SIGNALED:
1190 /* It's been signaled, return success */
1191 return VK_SUCCESS;
1192
1193 case ANV_BO_FENCE_STATE_SUBMITTED: {
1194 VkResult result = anv_device_bo_busy(device, impl->bo.bo);
1195 if (result == VK_SUCCESS) {
1196 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1197 return VK_SUCCESS;
1198 } else {
1199 return result;
1200 }
1201 }
1202 default:
1203 unreachable("Invalid fence status");
1204 }
1205
1206 case ANV_FENCE_TYPE_SYNCOBJ: {
1207 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
1208 if (ret == -1) {
1209 if (errno == ETIME) {
1210 return VK_NOT_READY;
1211 } else {
1212 /* We don't know the real error. */
1213 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1214 }
1215 } else {
1216 return VK_SUCCESS;
1217 }
1218 }
1219
1220 default:
1221 unreachable("Invalid fence type");
1222 }
1223 }
1224
1225 static VkResult
1226 anv_wait_for_syncobj_fences(struct anv_device *device,
1227 uint32_t fenceCount,
1228 const VkFence *pFences,
1229 bool waitAll,
1230 uint64_t abs_timeout_ns)
1231 {
1232 uint32_t *syncobjs = vk_zalloc(&device->alloc,
1233 sizeof(*syncobjs) * fenceCount, 8,
1234 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1235 if (!syncobjs)
1236 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1237
1238 for (uint32_t i = 0; i < fenceCount; i++) {
1239 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1240 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
1241
1242 struct anv_fence_impl *impl =
1243 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1244 &fence->temporary : &fence->permanent;
1245
1246 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1247 syncobjs[i] = impl->syncobj;
1248 }
1249
1250 /* The gem_syncobj_wait ioctl may return early due to an inherent
1251 * limitation in the way it computes timeouts. Loop until we've actually
1252 * passed the timeout.
1253 */
1254 int ret;
1255 do {
1256 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
1257 abs_timeout_ns, waitAll);
1258 } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
1259
1260 vk_free(&device->alloc, syncobjs);
1261
1262 if (ret == -1) {
1263 if (errno == ETIME) {
1264 return VK_TIMEOUT;
1265 } else {
1266 /* We don't know the real error. */
1267 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1268 }
1269 } else {
1270 return VK_SUCCESS;
1271 }
1272 }
1273
1274 static VkResult
1275 anv_wait_for_bo_fences(struct anv_device *device,
1276 uint32_t fenceCount,
1277 const VkFence *pFences,
1278 bool waitAll,
1279 uint64_t abs_timeout_ns)
1280 {
1281 VkResult result = VK_SUCCESS;
1282 uint32_t pending_fences = fenceCount;
1283 while (pending_fences) {
1284 pending_fences = 0;
1285 bool signaled_fences = false;
1286 for (uint32_t i = 0; i < fenceCount; i++) {
1287 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1288
1289 /* This function assumes that all fences are BO fences and that they
1290 * have no temporary state. Since BO fences will never be exported,
1291 * this should be a safe assumption.
1292 */
1293 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
1294 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1295 struct anv_fence_impl *impl = &fence->permanent;
1296
1297 switch (impl->bo.state) {
1298 case ANV_BO_FENCE_STATE_RESET:
1299 /* This fence hasn't been submitted yet, we'll catch it the next
1300 * time around. Yes, this may mean we dead-loop but, short of
1301 * lots of locking and a condition variable, there's not much that
1302 * we can do about that.
1303 */
1304 pending_fences++;
1305 continue;
1306
1307 case ANV_BO_FENCE_STATE_SIGNALED:
1308 /* This fence is not pending. If waitAll isn't set, we can return
1309 * early. Otherwise, we have to keep going.
1310 */
1311 if (!waitAll) {
1312 result = VK_SUCCESS;
1313 goto done;
1314 }
1315 continue;
1316
1317 case ANV_BO_FENCE_STATE_SUBMITTED:
1318 /* These are the fences we really care about. Go ahead and wait
1319 * on it until we hit a timeout.
1320 */
1321 result = anv_device_wait(device, impl->bo.bo,
1322 anv_get_relative_timeout(abs_timeout_ns));
1323 switch (result) {
1324 case VK_SUCCESS:
1325 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1326 signaled_fences = true;
1327 if (!waitAll)
1328 goto done;
1329 break;
1330
1331 case VK_TIMEOUT:
1332 goto done;
1333
1334 default:
1335 return result;
1336 }
1337 }
1338 }
1339
1340 if (pending_fences && !signaled_fences) {
1341 /* If we've hit this then someone decided to vkWaitForFences before
1342 * they've actually submitted any of them to a queue. This is a
1343 * fairly pessimal case, so it's ok to lock here and use a standard
1344 * pthreads condition variable.
1345 */
1346 pthread_mutex_lock(&device->mutex);
1347
1348 /* It's possible that some of the fences have changed state since the
1349 * last time we checked. Now that we have the lock, check for
1350 * pending fences again and don't wait if it's changed.
1351 */
1352 uint32_t now_pending_fences = 0;
1353 for (uint32_t i = 0; i < fenceCount; i++) {
1354 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1355 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
1356 now_pending_fences++;
1357 }
1358 assert(now_pending_fences <= pending_fences);
1359
1360 if (now_pending_fences == pending_fences) {
1361 struct timespec abstime = {
1362 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
1363 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
1364 };
1365
1366 ASSERTED int ret;
1367 ret = pthread_cond_timedwait(&device->queue_submit,
1368 &device->mutex, &abstime);
1369 assert(ret != EINVAL);
1370 if (anv_gettime_ns() >= abs_timeout_ns) {
1371 pthread_mutex_unlock(&device->mutex);
1372 result = VK_TIMEOUT;
1373 goto done;
1374 }
1375 }
1376
1377 pthread_mutex_unlock(&device->mutex);
1378 }
1379 }
1380
1381 done:
1382 if (anv_device_is_lost(device))
1383 return VK_ERROR_DEVICE_LOST;
1384
1385 return result;
1386 }
1387
1388 static VkResult
1389 anv_wait_for_wsi_fence(struct anv_device *device,
1390 const VkFence _fence,
1391 uint64_t abs_timeout)
1392 {
1393 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1394 struct anv_fence_impl *impl = &fence->permanent;
1395
1396 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
1397 }
1398
1399 static VkResult
1400 anv_wait_for_fences(struct anv_device *device,
1401 uint32_t fenceCount,
1402 const VkFence *pFences,
1403 bool waitAll,
1404 uint64_t abs_timeout)
1405 {
1406 VkResult result = VK_SUCCESS;
1407
1408 if (fenceCount <= 1 || waitAll) {
1409 for (uint32_t i = 0; i < fenceCount; i++) {
1410 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1411 switch (fence->permanent.type) {
1412 case ANV_FENCE_TYPE_BO:
1413 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
1414 true, abs_timeout);
1415 break;
1416 case ANV_FENCE_TYPE_SYNCOBJ:
1417 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
1418 true, abs_timeout);
1419 break;
1420 case ANV_FENCE_TYPE_WSI:
1421 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
1422 break;
1423 case ANV_FENCE_TYPE_NONE:
1424 result = VK_SUCCESS;
1425 break;
1426 }
1427 if (result != VK_SUCCESS)
1428 return result;
1429 }
1430 } else {
1431 do {
1432 for (uint32_t i = 0; i < fenceCount; i++) {
1433 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
1434 return VK_SUCCESS;
1435 }
1436 } while (anv_gettime_ns() < abs_timeout);
1437 result = VK_TIMEOUT;
1438 }
1439 return result;
1440 }
1441
1442 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
1443 {
1444 for (uint32_t i = 0; i < fenceCount; ++i) {
1445 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1446 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
1447 return false;
1448 }
1449 return true;
1450 }
1451
1452 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
1453 {
1454 for (uint32_t i = 0; i < fenceCount; ++i) {
1455 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1456 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
1457 return false;
1458 }
1459 return true;
1460 }
1461
1462 VkResult anv_WaitForFences(
1463 VkDevice _device,
1464 uint32_t fenceCount,
1465 const VkFence* pFences,
1466 VkBool32 waitAll,
1467 uint64_t timeout)
1468 {
1469 ANV_FROM_HANDLE(anv_device, device, _device);
1470
1471 if (anv_device_is_lost(device))
1472 return VK_ERROR_DEVICE_LOST;
1473
1474 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
1475 if (anv_all_fences_syncobj(fenceCount, pFences)) {
1476 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
1477 waitAll, abs_timeout);
1478 } else if (anv_all_fences_bo(fenceCount, pFences)) {
1479 return anv_wait_for_bo_fences(device, fenceCount, pFences,
1480 waitAll, abs_timeout);
1481 } else {
1482 return anv_wait_for_fences(device, fenceCount, pFences,
1483 waitAll, abs_timeout);
1484 }
1485 }
1486
1487 void anv_GetPhysicalDeviceExternalFenceProperties(
1488 VkPhysicalDevice physicalDevice,
1489 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
1490 VkExternalFenceProperties* pExternalFenceProperties)
1491 {
1492 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1493
1494 switch (pExternalFenceInfo->handleType) {
1495 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1496 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1497 if (device->has_syncobj_wait) {
1498 pExternalFenceProperties->exportFromImportedHandleTypes =
1499 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1500 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1501 pExternalFenceProperties->compatibleHandleTypes =
1502 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1503 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1504 pExternalFenceProperties->externalFenceFeatures =
1505 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
1506 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
1507 return;
1508 }
1509 break;
1510
1511 default:
1512 break;
1513 }
1514
1515 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1516 pExternalFenceProperties->compatibleHandleTypes = 0;
1517 pExternalFenceProperties->externalFenceFeatures = 0;
1518 }
1519
1520 VkResult anv_ImportFenceFdKHR(
1521 VkDevice _device,
1522 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
1523 {
1524 ANV_FROM_HANDLE(anv_device, device, _device);
1525 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
1526 int fd = pImportFenceFdInfo->fd;
1527
1528 assert(pImportFenceFdInfo->sType ==
1529 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
1530
1531 struct anv_fence_impl new_impl = {
1532 .type = ANV_FENCE_TYPE_NONE,
1533 };
1534
1535 switch (pImportFenceFdInfo->handleType) {
1536 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1537 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1538
1539 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1540 if (!new_impl.syncobj)
1541 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1542
1543 break;
1544
1545 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1546 /* Sync files are a bit tricky. Because we want to continue using the
1547 * syncobj implementation of WaitForFences, we don't use the sync file
1548 * directly but instead import it into a syncobj.
1549 */
1550 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1551
1552 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
1553 if (!new_impl.syncobj)
1554 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1555
1556 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1557 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1558 return vk_errorf(device->instance, NULL,
1559 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1560 "syncobj sync file import failed: %m");
1561 }
1562 break;
1563
1564 default:
1565 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1566 }
1567
1568 /* From the Vulkan 1.0.53 spec:
1569 *
1570 * "Importing a fence payload from a file descriptor transfers
1571 * ownership of the file descriptor from the application to the
1572 * Vulkan implementation. The application must not perform any
1573 * operations on the file descriptor after a successful import."
1574 *
1575 * If the import fails, we leave the file descriptor open.
1576 */
1577 close(fd);
1578
1579 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
1580 anv_fence_impl_cleanup(device, &fence->temporary);
1581 fence->temporary = new_impl;
1582 } else {
1583 anv_fence_impl_cleanup(device, &fence->permanent);
1584 fence->permanent = new_impl;
1585 }
1586
1587 return VK_SUCCESS;
1588 }
1589
1590 VkResult anv_GetFenceFdKHR(
1591 VkDevice _device,
1592 const VkFenceGetFdInfoKHR* pGetFdInfo,
1593 int* pFd)
1594 {
1595 ANV_FROM_HANDLE(anv_device, device, _device);
1596 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
1597
1598 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
1599
1600 struct anv_fence_impl *impl =
1601 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1602 &fence->temporary : &fence->permanent;
1603
1604 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1605 switch (pGetFdInfo->handleType) {
1606 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
1607 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1608 if (fd < 0)
1609 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1610
1611 *pFd = fd;
1612 break;
1613 }
1614
1615 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
1616 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1617 if (fd < 0)
1618 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1619
1620 *pFd = fd;
1621 break;
1622 }
1623
1624 default:
1625 unreachable("Invalid fence export handle type");
1626 }
1627
1628 /* From the Vulkan 1.0.53 spec:
1629 *
1630 * "Export operations have the same transference as the specified handle
1631 * type’s import operations. [...] If the fence was using a
1632 * temporarily imported payload, the fence’s prior permanent payload
1633 * will be restored.
1634 */
1635 if (impl == &fence->temporary)
1636 anv_fence_impl_cleanup(device, impl);
1637
1638 return VK_SUCCESS;
1639 }
1640
1641 // Queue semaphore functions
1642
1643 static VkSemaphoreTypeKHR
1644 get_semaphore_type(const void *pNext, uint64_t *initial_value)
1645 {
1646 const VkSemaphoreTypeCreateInfoKHR *type_info =
1647 vk_find_struct_const(pNext, SEMAPHORE_TYPE_CREATE_INFO_KHR);
1648
1649 if (!type_info)
1650 return VK_SEMAPHORE_TYPE_BINARY_KHR;
1651
1652 if (initial_value)
1653 *initial_value = type_info->initialValue;
1654 return type_info->semaphoreType;
1655 }
1656
1657 static VkResult
1658 binary_semaphore_create(struct anv_device *device,
1659 struct anv_semaphore_impl *impl,
1660 bool exportable)
1661 {
1662 if (device->instance->physicalDevice.has_syncobj) {
1663 impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1664 impl->syncobj = anv_gem_syncobj_create(device, 0);
1665 if (!impl->syncobj)
1666 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1667 return VK_SUCCESS;
1668 } else {
1669 impl->type = ANV_SEMAPHORE_TYPE_BO;
1670 VkResult result =
1671 anv_device_alloc_bo(device, 4096,
1672 ANV_BO_ALLOC_EXTERNAL |
1673 ANV_BO_ALLOC_IMPLICIT_SYNC,
1674 0 /* explicit_address */,
1675 &impl->bo);
1676 /* If we're going to use this as a fence, we need to *not* have the
1677 * EXEC_OBJECT_ASYNC bit set.
1678 */
1679 assert(!(impl->bo->flags & EXEC_OBJECT_ASYNC));
1680 return result;
1681 }
1682 }
1683
1684 static VkResult
1685 timeline_semaphore_create(struct anv_device *device,
1686 struct anv_semaphore_impl *impl,
1687 uint64_t initial_value)
1688 {
1689 impl->type = ANV_SEMAPHORE_TYPE_TIMELINE;
1690 anv_timeline_init(device, &impl->timeline, initial_value);
1691 return VK_SUCCESS;
1692 }
1693
1694 VkResult anv_CreateSemaphore(
1695 VkDevice _device,
1696 const VkSemaphoreCreateInfo* pCreateInfo,
1697 const VkAllocationCallbacks* pAllocator,
1698 VkSemaphore* pSemaphore)
1699 {
1700 ANV_FROM_HANDLE(anv_device, device, _device);
1701 struct anv_semaphore *semaphore;
1702
1703 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
1704
1705 uint64_t timeline_value = 0;
1706 VkSemaphoreTypeKHR sem_type = get_semaphore_type(pCreateInfo->pNext, &timeline_value);
1707
1708 semaphore = vk_alloc(&device->alloc, sizeof(*semaphore), 8,
1709 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1710 if (semaphore == NULL)
1711 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1712
1713 p_atomic_set(&semaphore->refcount, 1);
1714
1715 const VkExportSemaphoreCreateInfo *export =
1716 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
1717 VkExternalSemaphoreHandleTypeFlags handleTypes =
1718 export ? export->handleTypes : 0;
1719 VkResult result;
1720
1721 if (handleTypes == 0) {
1722 if (sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR)
1723 result = binary_semaphore_create(device, &semaphore->permanent, false);
1724 else
1725 result = timeline_semaphore_create(device, &semaphore->permanent, timeline_value);
1726 if (result != VK_SUCCESS) {
1727 vk_free2(&device->alloc, pAllocator, semaphore);
1728 return result;
1729 }
1730 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
1731 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1732 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1733 result = binary_semaphore_create(device, &semaphore->permanent, true);
1734 if (result != VK_SUCCESS) {
1735 vk_free2(&device->alloc, pAllocator, semaphore);
1736 return result;
1737 }
1738 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
1739 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
1740 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1741 if (device->instance->physicalDevice.has_syncobj) {
1742 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1743 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
1744 if (!semaphore->permanent.syncobj) {
1745 vk_free2(&device->alloc, pAllocator, semaphore);
1746 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1747 }
1748 } else {
1749 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
1750 semaphore->permanent.fd = -1;
1751 }
1752 } else {
1753 assert(!"Unknown handle type");
1754 vk_free2(&device->alloc, pAllocator, semaphore);
1755 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1756 }
1757
1758 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
1759
1760 *pSemaphore = anv_semaphore_to_handle(semaphore);
1761
1762 return VK_SUCCESS;
1763 }
1764
1765 static void
1766 anv_semaphore_impl_cleanup(struct anv_device *device,
1767 struct anv_semaphore_impl *impl)
1768 {
1769 switch (impl->type) {
1770 case ANV_SEMAPHORE_TYPE_NONE:
1771 case ANV_SEMAPHORE_TYPE_DUMMY:
1772 /* Dummy. Nothing to do */
1773 break;
1774
1775 case ANV_SEMAPHORE_TYPE_BO:
1776 anv_device_release_bo(device, impl->bo);
1777 break;
1778
1779 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1780 close(impl->fd);
1781 break;
1782
1783 case ANV_SEMAPHORE_TYPE_TIMELINE:
1784 anv_timeline_finish(device, &impl->timeline);
1785 break;
1786
1787 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1788 anv_gem_syncobj_destroy(device, impl->syncobj);
1789 break;
1790
1791 default:
1792 unreachable("Invalid semaphore type");
1793 }
1794
1795 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1796 }
1797
1798 void
1799 anv_semaphore_reset_temporary(struct anv_device *device,
1800 struct anv_semaphore *semaphore)
1801 {
1802 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1803 return;
1804
1805 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1806 }
1807
1808 static struct anv_semaphore *
1809 anv_semaphore_ref(struct anv_semaphore *semaphore)
1810 {
1811 assert(semaphore->refcount);
1812 p_atomic_inc(&semaphore->refcount);
1813 return semaphore;
1814 }
1815
1816 static void
1817 anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore)
1818 {
1819 if (!p_atomic_dec_zero(&semaphore->refcount))
1820 return;
1821
1822 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1823 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1824 vk_free(&device->alloc, semaphore);
1825 }
1826
1827 void anv_DestroySemaphore(
1828 VkDevice _device,
1829 VkSemaphore _semaphore,
1830 const VkAllocationCallbacks* pAllocator)
1831 {
1832 ANV_FROM_HANDLE(anv_device, device, _device);
1833 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1834
1835 if (semaphore == NULL)
1836 return;
1837
1838 anv_semaphore_unref(device, semaphore);
1839 }
1840
1841 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1842 VkPhysicalDevice physicalDevice,
1843 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1844 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1845 {
1846 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1847
1848 VkSemaphoreTypeKHR sem_type =
1849 get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
1850
1851 switch (pExternalSemaphoreInfo->handleType) {
1852 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1853 /* Timeline semaphores are not exportable. */
1854 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1855 break;
1856 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1857 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1858 pExternalSemaphoreProperties->compatibleHandleTypes =
1859 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1860 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1861 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1862 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1863 return;
1864
1865 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1866 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1867 break;
1868 if (!device->has_exec_fence)
1869 break;
1870 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1871 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1872 pExternalSemaphoreProperties->compatibleHandleTypes =
1873 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1874 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1875 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1876 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1877 return;
1878
1879 default:
1880 break;
1881 }
1882
1883 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1884 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1885 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1886 }
1887
1888 VkResult anv_ImportSemaphoreFdKHR(
1889 VkDevice _device,
1890 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1891 {
1892 ANV_FROM_HANDLE(anv_device, device, _device);
1893 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1894 int fd = pImportSemaphoreFdInfo->fd;
1895
1896 struct anv_semaphore_impl new_impl = {
1897 .type = ANV_SEMAPHORE_TYPE_NONE,
1898 };
1899
1900 switch (pImportSemaphoreFdInfo->handleType) {
1901 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1902 if (device->instance->physicalDevice.has_syncobj) {
1903 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1904
1905 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1906 if (!new_impl.syncobj)
1907 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1908 } else {
1909 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1910
1911 VkResult result = anv_device_import_bo(device, fd,
1912 ANV_BO_ALLOC_EXTERNAL |
1913 ANV_BO_ALLOC_IMPLICIT_SYNC,
1914 0 /* client_address */,
1915 &new_impl.bo);
1916 if (result != VK_SUCCESS)
1917 return result;
1918
1919 if (new_impl.bo->size < 4096) {
1920 anv_device_release_bo(device, new_impl.bo);
1921 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1922 }
1923
1924 /* If we're going to use this as a fence, we need to *not* have the
1925 * EXEC_OBJECT_ASYNC bit set.
1926 */
1927 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1928 }
1929
1930 /* From the Vulkan spec:
1931 *
1932 * "Importing semaphore state from a file descriptor transfers
1933 * ownership of the file descriptor from the application to the
1934 * Vulkan implementation. The application must not perform any
1935 * operations on the file descriptor after a successful import."
1936 *
1937 * If the import fails, we leave the file descriptor open.
1938 */
1939 close(fd);
1940 break;
1941
1942 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1943 if (device->instance->physicalDevice.has_syncobj) {
1944 new_impl = (struct anv_semaphore_impl) {
1945 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1946 .syncobj = anv_gem_syncobj_create(device, 0),
1947 };
1948 if (!new_impl.syncobj)
1949 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1950 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1951 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1952 return vk_errorf(device->instance, NULL,
1953 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1954 "syncobj sync file import failed: %m");
1955 }
1956 /* Ownership of the FD is transfered to Anv. Since we don't need it
1957 * anymore because the associated fence has been put into a syncobj,
1958 * we must close the FD.
1959 */
1960 close(fd);
1961 } else {
1962 new_impl = (struct anv_semaphore_impl) {
1963 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1964 .fd = fd,
1965 };
1966 }
1967 break;
1968
1969 default:
1970 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1971 }
1972
1973 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1974 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1975 semaphore->temporary = new_impl;
1976 } else {
1977 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1978 semaphore->permanent = new_impl;
1979 }
1980
1981 return VK_SUCCESS;
1982 }
1983
1984 VkResult anv_GetSemaphoreFdKHR(
1985 VkDevice _device,
1986 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
1987 int* pFd)
1988 {
1989 ANV_FROM_HANDLE(anv_device, device, _device);
1990 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1991 VkResult result;
1992 int fd;
1993
1994 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1995
1996 struct anv_semaphore_impl *impl =
1997 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1998 &semaphore->temporary : &semaphore->permanent;
1999
2000 switch (impl->type) {
2001 case ANV_SEMAPHORE_TYPE_BO:
2002 result = anv_device_export_bo(device, impl->bo, pFd);
2003 if (result != VK_SUCCESS)
2004 return result;
2005 break;
2006
2007 case ANV_SEMAPHORE_TYPE_SYNC_FILE: {
2008 /* There's a potential race here with vkQueueSubmit if you are trying
2009 * to export a semaphore Fd while the queue submit is still happening.
2010 * This can happen if we see all dependencies get resolved via timeline
2011 * semaphore waits completing before the execbuf completes and we
2012 * process the resulting out fence. To work around this, take a lock
2013 * around grabbing the fd.
2014 */
2015 pthread_mutex_lock(&device->mutex);
2016
2017 /* From the Vulkan 1.0.53 spec:
2018 *
2019 * "...exporting a semaphore payload to a handle with copy
2020 * transference has the same side effects on the source
2021 * semaphore’s payload as executing a semaphore wait operation."
2022 *
2023 * In other words, it may still be a SYNC_FD semaphore, but it's now
2024 * considered to have been waited on and no longer has a sync file
2025 * attached.
2026 */
2027 int fd = impl->fd;
2028 impl->fd = -1;
2029
2030 pthread_mutex_unlock(&device->mutex);
2031
2032 /* There are two reasons why this could happen:
2033 *
2034 * 1) The user is trying to export without submitting something that
2035 * signals the semaphore. If this is the case, it's their bug so
2036 * what we return here doesn't matter.
2037 *
2038 * 2) The kernel didn't give us a file descriptor. The most likely
2039 * reason for this is running out of file descriptors.
2040 */
2041 if (fd < 0)
2042 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2043
2044 *pFd = fd;
2045 return VK_SUCCESS;
2046 }
2047
2048 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
2049 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
2050 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
2051 else {
2052 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
2053 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
2054 }
2055 if (fd < 0)
2056 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2057 *pFd = fd;
2058 break;
2059
2060 default:
2061 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2062 }
2063
2064 /* From the Vulkan 1.0.53 spec:
2065 *
2066 * "Export operations have the same transference as the specified handle
2067 * type’s import operations. [...] If the semaphore was using a
2068 * temporarily imported payload, the semaphore’s prior permanent payload
2069 * will be restored.
2070 */
2071 if (impl == &semaphore->temporary)
2072 anv_semaphore_impl_cleanup(device, impl);
2073
2074 return VK_SUCCESS;
2075 }
2076
2077 VkResult anv_GetSemaphoreCounterValueKHR(
2078 VkDevice _device,
2079 VkSemaphore _semaphore,
2080 uint64_t* pValue)
2081 {
2082 ANV_FROM_HANDLE(anv_device, device, _device);
2083 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
2084
2085 struct anv_semaphore_impl *impl =
2086 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2087 &semaphore->temporary : &semaphore->permanent;
2088
2089 switch (impl->type) {
2090 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2091 pthread_mutex_lock(&device->mutex);
2092 *pValue = impl->timeline.highest_past;
2093 pthread_mutex_unlock(&device->mutex);
2094 return VK_SUCCESS;
2095 }
2096
2097 default:
2098 unreachable("Invalid semaphore type");
2099 }
2100 }
2101
2102 static VkResult
2103 anv_timeline_wait_locked(struct anv_device *device,
2104 struct anv_timeline *timeline,
2105 uint64_t serial, uint64_t abs_timeout_ns)
2106 {
2107 /* Wait on the queue_submit condition variable until the timeline has a
2108 * time point pending that's at least as high as serial.
2109 */
2110 while (timeline->highest_pending < serial) {
2111 struct timespec abstime = {
2112 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
2113 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
2114 };
2115
2116 int ret = pthread_cond_timedwait(&device->queue_submit,
2117 &device->mutex, &abstime);
2118 assert(ret != EINVAL);
2119 if (anv_gettime_ns() >= abs_timeout_ns &&
2120 timeline->highest_pending < serial)
2121 return VK_TIMEOUT;
2122 }
2123
2124 while (1) {
2125 VkResult result = anv_timeline_gc_locked(device, timeline);
2126 if (result != VK_SUCCESS)
2127 return result;
2128
2129 if (timeline->highest_past >= serial)
2130 return VK_SUCCESS;
2131
2132 /* If we got here, our earliest time point has a busy BO */
2133 struct anv_timeline_point *point =
2134 list_first_entry(&timeline->points,
2135 struct anv_timeline_point, link);
2136
2137 /* Drop the lock while we wait. */
2138 point->waiting++;
2139 pthread_mutex_unlock(&device->mutex);
2140
2141 result = anv_device_wait(device, point->bo,
2142 anv_get_relative_timeout(abs_timeout_ns));
2143
2144 /* Pick the mutex back up */
2145 pthread_mutex_lock(&device->mutex);
2146 point->waiting--;
2147
2148 /* This covers both VK_TIMEOUT and VK_ERROR_DEVICE_LOST */
2149 if (result != VK_SUCCESS)
2150 return result;
2151 }
2152 }
2153
2154 static VkResult
2155 anv_timelines_wait(struct anv_device *device,
2156 struct anv_timeline **timelines,
2157 const uint64_t *serials,
2158 uint32_t n_timelines,
2159 bool wait_all,
2160 uint64_t abs_timeout_ns)
2161 {
2162 if (!wait_all && n_timelines > 1) {
2163 while (1) {
2164 VkResult result;
2165 pthread_mutex_lock(&device->mutex);
2166 for (uint32_t i = 0; i < n_timelines; i++) {
2167 result =
2168 anv_timeline_wait_locked(device, timelines[i], serials[i], 0);
2169 if (result != VK_TIMEOUT)
2170 break;
2171 }
2172
2173 if (result != VK_TIMEOUT ||
2174 anv_gettime_ns() >= abs_timeout_ns) {
2175 pthread_mutex_unlock(&device->mutex);
2176 return result;
2177 }
2178
2179 /* If none of them are ready do a short wait so we don't completely
2180 * spin while holding the lock. The 10us is completely arbitrary.
2181 */
2182 uint64_t abs_short_wait_ns =
2183 anv_get_absolute_timeout(
2184 MIN2((anv_gettime_ns() - abs_timeout_ns) / 10, 10 * 1000));
2185 struct timespec abstime = {
2186 .tv_sec = abs_short_wait_ns / NSEC_PER_SEC,
2187 .tv_nsec = abs_short_wait_ns % NSEC_PER_SEC,
2188 };
2189 ASSERTED int ret;
2190 ret = pthread_cond_timedwait(&device->queue_submit,
2191 &device->mutex, &abstime);
2192 assert(ret != EINVAL);
2193 }
2194 } else {
2195 VkResult result = VK_SUCCESS;
2196 pthread_mutex_lock(&device->mutex);
2197 for (uint32_t i = 0; i < n_timelines; i++) {
2198 result =
2199 anv_timeline_wait_locked(device, timelines[i],
2200 serials[i], abs_timeout_ns);
2201 if (result != VK_SUCCESS)
2202 break;
2203 }
2204 pthread_mutex_unlock(&device->mutex);
2205 return result;
2206 }
2207 }
2208
2209 VkResult anv_WaitSemaphoresKHR(
2210 VkDevice _device,
2211 const VkSemaphoreWaitInfoKHR* pWaitInfo,
2212 uint64_t timeout)
2213 {
2214 ANV_FROM_HANDLE(anv_device, device, _device);
2215
2216 struct anv_timeline **timelines =
2217 vk_alloc(&device->alloc,
2218 pWaitInfo->semaphoreCount * sizeof(*timelines),
2219 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2220 if (!timelines)
2221 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2222
2223 uint64_t *values = vk_alloc(&device->alloc,
2224 pWaitInfo->semaphoreCount * sizeof(*values),
2225 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2226 if (!values) {
2227 vk_free(&device->alloc, timelines);
2228 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2229 }
2230
2231 uint32_t handle_count = 0;
2232 for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
2233 ANV_FROM_HANDLE(anv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
2234 struct anv_semaphore_impl *impl =
2235 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2236 &semaphore->temporary : &semaphore->permanent;
2237
2238 assert(impl->type == ANV_SEMAPHORE_TYPE_TIMELINE);
2239
2240 if (pWaitInfo->pValues[i] == 0)
2241 continue;
2242
2243 timelines[handle_count] = &impl->timeline;
2244 values[handle_count] = pWaitInfo->pValues[i];
2245 handle_count++;
2246 }
2247
2248 VkResult result = VK_SUCCESS;
2249 if (handle_count > 0) {
2250 result = anv_timelines_wait(device, timelines, values, handle_count,
2251 !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR),
2252 timeout);
2253 }
2254
2255 vk_free(&device->alloc, timelines);
2256 vk_free(&device->alloc, values);
2257
2258 return result;
2259 }
2260
2261 VkResult anv_SignalSemaphoreKHR(
2262 VkDevice _device,
2263 const VkSemaphoreSignalInfoKHR* pSignalInfo)
2264 {
2265 ANV_FROM_HANDLE(anv_device, device, _device);
2266 ANV_FROM_HANDLE(anv_semaphore, semaphore, pSignalInfo->semaphore);
2267
2268 struct anv_semaphore_impl *impl =
2269 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2270 &semaphore->temporary : &semaphore->permanent;
2271
2272 switch (impl->type) {
2273 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2274 pthread_mutex_lock(&device->mutex);
2275
2276 VkResult result = anv_timeline_gc_locked(device, &impl->timeline);
2277
2278 assert(pSignalInfo->value > impl->timeline.highest_pending);
2279
2280 impl->timeline.highest_pending = impl->timeline.highest_past = pSignalInfo->value;
2281
2282 if (result == VK_SUCCESS)
2283 result = anv_device_submit_deferred_locked(device);
2284
2285 pthread_cond_broadcast(&device->queue_submit);
2286 pthread_mutex_unlock(&device->mutex);
2287 return result;
2288 }
2289
2290 default:
2291 unreachable("Invalid semaphore type");
2292 }
2293 }