7bdcb5e2b7f9fb327a59c4d28ff55c5050c91cae
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31
32 #include "anv_private.h"
33 #include "vk_util.h"
34
35 #include "genxml/gen7_pack.h"
36
37 uint64_t anv_gettime_ns(void)
38 {
39 struct timespec current;
40 clock_gettime(CLOCK_MONOTONIC, &current);
41 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
42 }
43
44 uint64_t anv_get_absolute_timeout(uint64_t timeout)
45 {
46 if (timeout == 0)
47 return 0;
48 uint64_t current_time = anv_gettime_ns();
49 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
50
51 timeout = MIN2(max_timeout, timeout);
52
53 return (current_time + timeout);
54 }
55
56 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
57 {
58 uint64_t now = anv_gettime_ns();
59
60 /* We don't want negative timeouts.
61 *
62 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
63 * supposed to block indefinitely timeouts < 0. Unfortunately,
64 * this was broken for a couple of kernel releases. Since there's
65 * no way to know whether or not the kernel we're using is one of
66 * the broken ones, the best we can do is to clamp the timeout to
67 * INT64_MAX. This limits the maximum timeout from 584 years to
68 * 292 years - likely not a big deal.
69 */
70 if (abs_timeout < now)
71 return 0;
72
73 uint64_t rel_timeout = abs_timeout - now;
74 if (rel_timeout > (uint64_t) INT64_MAX)
75 rel_timeout = INT64_MAX;
76
77 return rel_timeout;
78 }
79
80 static struct anv_semaphore *anv_semaphore_ref(struct anv_semaphore *semaphore);
81 static void anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore);
82 static void anv_semaphore_impl_cleanup(struct anv_device *device,
83 struct anv_semaphore_impl *impl);
84
85 static void
86 anv_queue_submit_free(struct anv_device *device,
87 struct anv_queue_submit *submit)
88 {
89 const VkAllocationCallbacks *alloc = submit->alloc;
90
91 for (uint32_t i = 0; i < submit->temporary_semaphore_count; i++)
92 anv_semaphore_impl_cleanup(device, &submit->temporary_semaphores[i]);
93 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++)
94 anv_semaphore_unref(device, submit->sync_fd_semaphores[i]);
95 /* Execbuf does not consume the in_fence. It's our job to close it. */
96 if (submit->in_fence != -1)
97 close(submit->in_fence);
98 if (submit->out_fence != -1)
99 close(submit->out_fence);
100 vk_free(alloc, submit->fences);
101 vk_free(alloc, submit->temporary_semaphores);
102 vk_free(alloc, submit->wait_timelines);
103 vk_free(alloc, submit->wait_timeline_values);
104 vk_free(alloc, submit->signal_timelines);
105 vk_free(alloc, submit->signal_timeline_values);
106 vk_free(alloc, submit->fence_bos);
107 vk_free(alloc, submit);
108 }
109
110 static bool
111 anv_queue_submit_ready_locked(struct anv_queue_submit *submit)
112 {
113 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
114 if (submit->wait_timeline_values[i] > submit->wait_timelines[i]->highest_pending)
115 return false;
116 }
117
118 return true;
119 }
120
121 static VkResult
122 anv_timeline_init(struct anv_device *device,
123 struct anv_timeline *timeline,
124 uint64_t initial_value)
125 {
126 timeline->highest_past =
127 timeline->highest_pending = initial_value;
128 list_inithead(&timeline->points);
129 list_inithead(&timeline->free_points);
130
131 return VK_SUCCESS;
132 }
133
134 static void
135 anv_timeline_finish(struct anv_device *device,
136 struct anv_timeline *timeline)
137 {
138 list_for_each_entry_safe(struct anv_timeline_point, point,
139 &timeline->free_points, link) {
140 list_del(&point->link);
141 anv_device_release_bo(device, point->bo);
142 vk_free(&device->alloc, point);
143 }
144 list_for_each_entry_safe(struct anv_timeline_point, point,
145 &timeline->points, link) {
146 list_del(&point->link);
147 anv_device_release_bo(device, point->bo);
148 vk_free(&device->alloc, point);
149 }
150 }
151
152 static VkResult
153 anv_timeline_add_point_locked(struct anv_device *device,
154 struct anv_timeline *timeline,
155 uint64_t value,
156 struct anv_timeline_point **point)
157 {
158 VkResult result = VK_SUCCESS;
159
160 if (list_is_empty(&timeline->free_points)) {
161 *point =
162 vk_zalloc(&device->alloc, sizeof(**point),
163 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
164 if (!(*point))
165 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
166 if (result == VK_SUCCESS) {
167 result = anv_device_alloc_bo(device, 4096,
168 ANV_BO_ALLOC_EXTERNAL |
169 ANV_BO_ALLOC_IMPLICIT_SYNC,
170 0 /* explicit_address */,
171 &(*point)->bo);
172 if (result != VK_SUCCESS)
173 vk_free(&device->alloc, *point);
174 }
175 } else {
176 *point = list_first_entry(&timeline->free_points,
177 struct anv_timeline_point, link);
178 list_del(&(*point)->link);
179 }
180
181 if (result == VK_SUCCESS) {
182 (*point)->serial = value;
183 list_addtail(&(*point)->link, &timeline->points);
184 }
185
186 return result;
187 }
188
189 static VkResult
190 anv_timeline_gc_locked(struct anv_device *device,
191 struct anv_timeline *timeline)
192 {
193 list_for_each_entry_safe(struct anv_timeline_point, point,
194 &timeline->points, link) {
195 /* timeline->higest_pending is only incremented once submission has
196 * happened. If this point has a greater serial, it means the point
197 * hasn't been submitted yet.
198 */
199 if (point->serial > timeline->highest_pending)
200 return VK_SUCCESS;
201
202 /* If someone is waiting on this time point, consider it busy and don't
203 * try to recycle it. There's a slim possibility that it's no longer
204 * busy by the time we look at it but we would be recycling it out from
205 * under a waiter and that can lead to weird races.
206 *
207 * We walk the list in-order so if this time point is still busy so is
208 * every following time point
209 */
210 assert(point->waiting >= 0);
211 if (point->waiting)
212 return VK_SUCCESS;
213
214 /* Garbage collect any signaled point. */
215 VkResult result = anv_device_bo_busy(device, point->bo);
216 if (result == VK_NOT_READY) {
217 /* We walk the list in-order so if this time point is still busy so
218 * is every following time point
219 */
220 return VK_SUCCESS;
221 } else if (result != VK_SUCCESS) {
222 return result;
223 }
224
225 assert(timeline->highest_past < point->serial);
226 timeline->highest_past = point->serial;
227
228 list_del(&point->link);
229 list_add(&point->link, &timeline->free_points);
230 }
231
232 return VK_SUCCESS;
233 }
234
235 static VkResult anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
236 struct anv_bo *bo,
237 bool signal);
238
239 static VkResult
240 anv_queue_submit_timeline_locked(struct anv_queue *queue,
241 struct anv_queue_submit *submit)
242 {
243 VkResult result;
244
245 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
246 struct anv_timeline *timeline = submit->wait_timelines[i];
247 uint64_t wait_value = submit->wait_timeline_values[i];
248
249 if (timeline->highest_past >= wait_value)
250 continue;
251
252 list_for_each_entry(struct anv_timeline_point, point, &timeline->points, link) {
253 if (point->serial < wait_value)
254 continue;
255 result = anv_queue_submit_add_fence_bo(submit, point->bo, false);
256 if (result != VK_SUCCESS)
257 return result;
258 break;
259 }
260 }
261 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
262 struct anv_timeline *timeline = submit->signal_timelines[i];
263 uint64_t signal_value = submit->signal_timeline_values[i];
264 struct anv_timeline_point *point;
265
266 result = anv_timeline_add_point_locked(queue->device, timeline,
267 signal_value, &point);
268 if (result != VK_SUCCESS)
269 return result;
270
271 result = anv_queue_submit_add_fence_bo(submit, point->bo, true);
272 if (result != VK_SUCCESS)
273 return result;
274 }
275
276 result = anv_queue_execbuf_locked(queue, submit);
277
278 if (result == VK_SUCCESS) {
279 /* Update the pending values in the timeline objects. */
280 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
281 struct anv_timeline *timeline = submit->signal_timelines[i];
282 uint64_t signal_value = submit->signal_timeline_values[i];
283
284 assert(signal_value > timeline->highest_pending);
285 timeline->highest_pending = signal_value;
286 }
287
288 /* Update signaled semaphores backed by syncfd. */
289 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++) {
290 struct anv_semaphore *semaphore = submit->sync_fd_semaphores[i];
291 /* Out fences can't have temporary state because that would imply
292 * that we imported a sync file and are trying to signal it.
293 */
294 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
295 struct anv_semaphore_impl *impl = &semaphore->permanent;
296
297 assert(impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE);
298 impl->fd = dup(submit->out_fence);
299 }
300 } else {
301 /* Unblock any waiter by signaling the points, the application will get
302 * a device lost error code.
303 */
304 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
305 struct anv_timeline *timeline = submit->signal_timelines[i];
306 uint64_t signal_value = submit->signal_timeline_values[i];
307
308 assert(signal_value > timeline->highest_pending);
309 timeline->highest_past = timeline->highest_pending = signal_value;
310 }
311 }
312
313 return result;
314 }
315
316 static VkResult
317 anv_queue_submit_deferred_locked(struct anv_queue *queue, uint32_t *advance)
318 {
319 VkResult result = VK_SUCCESS;
320
321 /* Go through all the queued submissions and submit then until we find one
322 * that's waiting on a point that hasn't materialized yet.
323 */
324 list_for_each_entry_safe(struct anv_queue_submit, submit,
325 &queue->queued_submits, link) {
326 if (!anv_queue_submit_ready_locked(submit))
327 break;
328
329 (*advance)++;
330 list_del(&submit->link);
331
332 result = anv_queue_submit_timeline_locked(queue, submit);
333
334 anv_queue_submit_free(queue->device, submit);
335
336 if (result != VK_SUCCESS)
337 break;
338 }
339
340 return result;
341 }
342
343 static VkResult
344 anv_device_submit_deferred_locked(struct anv_device *device)
345 {
346 uint32_t advance = 0;
347 return anv_queue_submit_deferred_locked(&device->queue, &advance);
348 }
349
350 static VkResult
351 _anv_queue_submit(struct anv_queue *queue, struct anv_queue_submit **_submit)
352 {
353 struct anv_queue_submit *submit = *_submit;
354
355 /* Wait before signal behavior means we might keep alive the
356 * anv_queue_submit object a bit longer, so transfer the ownership to the
357 * anv_queue.
358 */
359 *_submit = NULL;
360
361 pthread_mutex_lock(&queue->device->mutex);
362 list_addtail(&submit->link, &queue->queued_submits);
363 VkResult result = anv_device_submit_deferred_locked(queue->device);
364 pthread_mutex_unlock(&queue->device->mutex);
365 return result;
366 }
367
368 VkResult
369 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
370 {
371 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
372 queue->device = device;
373 queue->flags = 0;
374
375 list_inithead(&queue->queued_submits);
376
377 return VK_SUCCESS;
378 }
379
380 void
381 anv_queue_finish(struct anv_queue *queue)
382 {
383 }
384
385 static VkResult
386 anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
387 struct anv_bo *bo,
388 bool signal)
389 {
390 if (submit->fence_bo_count >= submit->fence_bo_array_length) {
391 uint32_t new_len = MAX2(submit->fence_bo_array_length * 2, 64);
392
393 submit->fence_bos =
394 vk_realloc(submit->alloc,
395 submit->fence_bos, new_len * sizeof(*submit->fence_bos),
396 8, submit->alloc_scope);
397 if (submit->fence_bos == NULL)
398 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
399
400 submit->fence_bo_array_length = new_len;
401 }
402
403 /* Take advantage that anv_bo are allocated at 8 byte alignement so we can
404 * use the lowest bit to store whether this is a BO we need to signal.
405 */
406 submit->fence_bos[submit->fence_bo_count++] = anv_pack_ptr(bo, 1, signal);
407
408 return VK_SUCCESS;
409 }
410
411 static VkResult
412 anv_queue_submit_add_syncobj(struct anv_queue_submit* submit,
413 struct anv_device *device,
414 uint32_t handle, uint32_t flags)
415 {
416 assert(flags != 0);
417
418 if (submit->fence_count >= submit->fence_array_length) {
419 uint32_t new_len = MAX2(submit->fence_array_length * 2, 64);
420
421 submit->fences =
422 vk_realloc(submit->alloc,
423 submit->fences, new_len * sizeof(*submit->fences),
424 8, submit->alloc_scope);
425 if (submit->fences == NULL)
426 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
427
428 submit->fence_array_length = new_len;
429 }
430
431 submit->fences[submit->fence_count++] = (struct drm_i915_gem_exec_fence) {
432 .handle = handle,
433 .flags = flags,
434 };
435
436 return VK_SUCCESS;
437 }
438
439 static VkResult
440 anv_queue_submit_add_sync_fd_fence(struct anv_queue_submit *submit,
441 struct anv_semaphore *semaphore)
442 {
443 if (submit->sync_fd_semaphore_count >= submit->sync_fd_semaphore_array_length) {
444 uint32_t new_len = MAX2(submit->sync_fd_semaphore_array_length * 2, 64);
445 struct anv_semaphore **new_semaphores =
446 vk_realloc(submit->alloc, submit->sync_fd_semaphores,
447 new_len * sizeof(*submit->sync_fd_semaphores), 8,
448 submit->alloc_scope);
449 if (new_semaphores == NULL)
450 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
451
452 submit->sync_fd_semaphores = new_semaphores;
453 }
454
455 submit->sync_fd_semaphores[submit->sync_fd_semaphore_count++] =
456 anv_semaphore_ref(semaphore);
457 submit->need_out_fence = true;
458
459 return VK_SUCCESS;
460 }
461
462 static VkResult
463 anv_queue_submit_add_timeline_wait(struct anv_queue_submit* submit,
464 struct anv_device *device,
465 struct anv_timeline *timeline,
466 uint64_t value)
467 {
468 if (submit->wait_timeline_count >= submit->wait_timeline_array_length) {
469 uint32_t new_len = MAX2(submit->wait_timeline_array_length * 2, 64);
470
471 submit->wait_timelines =
472 vk_realloc(submit->alloc,
473 submit->wait_timelines, new_len * sizeof(*submit->wait_timelines),
474 8, submit->alloc_scope);
475 if (submit->wait_timelines == NULL)
476 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
477
478 submit->wait_timeline_values =
479 vk_realloc(submit->alloc,
480 submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
481 8, submit->alloc_scope);
482 if (submit->wait_timeline_values == NULL)
483 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
484
485 submit->wait_timeline_array_length = new_len;
486 }
487
488 submit->wait_timelines[submit->wait_timeline_count] = timeline;
489 submit->wait_timeline_values[submit->wait_timeline_count] = value;
490
491 submit->wait_timeline_count++;
492
493 return VK_SUCCESS;
494 }
495
496 static VkResult
497 anv_queue_submit_add_timeline_signal(struct anv_queue_submit* submit,
498 struct anv_device *device,
499 struct anv_timeline *timeline,
500 uint64_t value)
501 {
502 assert(timeline->highest_pending < value);
503
504 if (submit->signal_timeline_count >= submit->signal_timeline_array_length) {
505 uint32_t new_len = MAX2(submit->signal_timeline_array_length * 2, 64);
506
507 submit->signal_timelines =
508 vk_realloc(submit->alloc,
509 submit->signal_timelines, new_len * sizeof(*submit->signal_timelines),
510 8, submit->alloc_scope);
511 if (submit->signal_timelines == NULL)
512 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
513
514 submit->signal_timeline_values =
515 vk_realloc(submit->alloc,
516 submit->signal_timeline_values, new_len * sizeof(*submit->signal_timeline_values),
517 8, submit->alloc_scope);
518 if (submit->signal_timeline_values == NULL)
519 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
520
521 submit->signal_timeline_array_length = new_len;
522 }
523
524 submit->signal_timelines[submit->signal_timeline_count] = timeline;
525 submit->signal_timeline_values[submit->signal_timeline_count] = value;
526
527 submit->signal_timeline_count++;
528
529 return VK_SUCCESS;
530 }
531
532 static struct anv_queue_submit *
533 anv_queue_submit_alloc(struct anv_device *device)
534 {
535 const VkAllocationCallbacks *alloc = &device->alloc;
536 VkSystemAllocationScope alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE;
537
538 struct anv_queue_submit *submit = vk_zalloc(alloc, sizeof(*submit), 8, alloc_scope);
539 if (!submit)
540 return NULL;
541
542 submit->alloc = alloc;
543 submit->alloc_scope = alloc_scope;
544 submit->in_fence = -1;
545 submit->out_fence = -1;
546
547 return submit;
548 }
549
550 VkResult
551 anv_queue_submit_simple_batch(struct anv_queue *queue,
552 struct anv_batch *batch)
553 {
554 struct anv_device *device = queue->device;
555 struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
556 if (!submit)
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558
559 bool has_syncobj_wait = device->instance->physicalDevice.has_syncobj_wait;
560 VkResult result;
561 uint32_t syncobj;
562 struct anv_bo *batch_bo, *sync_bo;
563
564 if (has_syncobj_wait) {
565 syncobj = anv_gem_syncobj_create(device, 0);
566 if (!syncobj) {
567 result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
568 goto err_free_submit;
569 }
570
571 result = anv_queue_submit_add_syncobj(submit, device, syncobj,
572 I915_EXEC_FENCE_SIGNAL);
573 } else {
574 result = anv_device_alloc_bo(device, 4096,
575 ANV_BO_ALLOC_EXTERNAL |
576 ANV_BO_ALLOC_IMPLICIT_SYNC,
577 0 /* explicit_address */,
578 &sync_bo);
579 if (result != VK_SUCCESS)
580 goto err_free_submit;
581
582 result = anv_queue_submit_add_fence_bo(submit, sync_bo, true /* signal */);
583 }
584
585 if (result != VK_SUCCESS)
586 goto err_destroy_sync_primitive;
587
588 if (batch) {
589 uint32_t size = align_u32(batch->next - batch->start, 8);
590 result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &batch_bo);
591 if (result != VK_SUCCESS)
592 goto err_destroy_sync_primitive;
593
594 memcpy(batch_bo->map, batch->start, size);
595 if (!device->info.has_llc)
596 gen_flush_range(batch_bo->map, size);
597
598 submit->simple_bo = batch_bo;
599 submit->simple_bo_size = size;
600 }
601
602 result = _anv_queue_submit(queue, &submit);
603
604 if (result == VK_SUCCESS) {
605 if (has_syncobj_wait) {
606 if (anv_gem_syncobj_wait(device, &syncobj, 1,
607 anv_get_absolute_timeout(INT64_MAX), true))
608 result = anv_device_set_lost(device, "anv_gem_syncobj_wait failed: %m");
609 anv_gem_syncobj_destroy(device, syncobj);
610 } else {
611 result = anv_device_wait(device, sync_bo,
612 anv_get_relative_timeout(INT64_MAX));
613 anv_device_release_bo(device, sync_bo);
614 }
615 }
616
617 if (batch)
618 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
619
620 if (submit)
621 anv_queue_submit_free(device, submit);
622
623 return result;
624
625 err_destroy_sync_primitive:
626 if (has_syncobj_wait)
627 anv_gem_syncobj_destroy(device, syncobj);
628 else
629 anv_device_release_bo(device, sync_bo);
630 err_free_submit:
631 if (submit)
632 anv_queue_submit_free(device, submit);
633
634 return result;
635 }
636
637 /* Transfer ownership of temporary semaphores from the VkSemaphore object to
638 * the anv_queue_submit object. Those temporary semaphores are then freed in
639 * anv_queue_submit_free() once the driver is finished with them.
640 */
641 static VkResult
642 maybe_transfer_temporary_semaphore(struct anv_queue_submit *submit,
643 struct anv_semaphore *semaphore,
644 struct anv_semaphore_impl **out_impl)
645 {
646 struct anv_semaphore_impl *impl = &semaphore->temporary;
647
648 if (impl->type == ANV_SEMAPHORE_TYPE_NONE) {
649 *out_impl = &semaphore->permanent;
650 return VK_SUCCESS;
651 }
652
653 /* BO backed timeline semaphores cannot be temporary. */
654 assert(impl->type != ANV_SEMAPHORE_TYPE_TIMELINE);
655
656 /*
657 * There is a requirement to reset semaphore to their permanent state after
658 * submission. From the Vulkan 1.0.53 spec:
659 *
660 * "If the import is temporary, the implementation must restore the
661 * semaphore to its prior permanent state after submitting the next
662 * semaphore wait operation."
663 *
664 * In the case we defer the actual submission to a thread because of the
665 * wait-before-submit behavior required for timeline semaphores, we need to
666 * make copies of the temporary syncobj to ensure they stay alive until we
667 * do the actual execbuffer ioctl.
668 */
669 if (submit->temporary_semaphore_count >= submit->temporary_semaphore_array_length) {
670 uint32_t new_len = MAX2(submit->temporary_semaphore_array_length * 2, 8);
671 /* Make sure that if the realloc fails, we still have the old semaphore
672 * array around to properly clean things up on failure.
673 */
674 struct anv_semaphore_impl *new_array =
675 vk_realloc(submit->alloc,
676 submit->temporary_semaphores,
677 new_len * sizeof(*submit->temporary_semaphores),
678 8, submit->alloc_scope);
679 if (new_array == NULL)
680 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
681
682 submit->temporary_semaphores = new_array;
683 submit->temporary_semaphore_array_length = new_len;
684 }
685
686 /* Copy anv_semaphore_impl into anv_queue_submit. */
687 submit->temporary_semaphores[submit->temporary_semaphore_count++] = *impl;
688 *out_impl = &submit->temporary_semaphores[submit->temporary_semaphore_count - 1];
689
690 /* Clear the incoming semaphore */
691 impl->type = ANV_SEMAPHORE_TYPE_NONE;
692
693 return VK_SUCCESS;
694 }
695
696 static VkResult
697 anv_queue_submit(struct anv_queue *queue,
698 struct anv_cmd_buffer *cmd_buffer,
699 const VkSemaphore *in_semaphores,
700 const uint64_t *in_values,
701 uint32_t num_in_semaphores,
702 const VkSemaphore *out_semaphores,
703 const uint64_t *out_values,
704 uint32_t num_out_semaphores,
705 struct anv_bo *wsi_signal_bo,
706 VkFence _fence)
707 {
708 ANV_FROM_HANDLE(anv_fence, fence, _fence);
709 struct anv_device *device = queue->device;
710 UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
711 struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
712 if (!submit)
713 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
714
715 submit->cmd_buffer = cmd_buffer;
716
717 VkResult result = VK_SUCCESS;
718
719 for (uint32_t i = 0; i < num_in_semaphores; i++) {
720 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
721 struct anv_semaphore_impl *impl;
722
723 result = maybe_transfer_temporary_semaphore(submit, semaphore, &impl);
724 if (result != VK_SUCCESS)
725 goto error;
726
727 switch (impl->type) {
728 case ANV_SEMAPHORE_TYPE_BO:
729 assert(!pdevice->has_syncobj);
730 result = anv_queue_submit_add_fence_bo(submit, impl->bo, false /* signal */);
731 if (result != VK_SUCCESS)
732 goto error;
733 break;
734
735 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
736 assert(!pdevice->has_syncobj);
737 if (submit->in_fence == -1) {
738 submit->in_fence = impl->fd;
739 if (submit->in_fence == -1) {
740 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
741 goto error;
742 }
743 impl->fd = -1;
744 } else {
745 int merge = anv_gem_sync_file_merge(device, submit->in_fence, impl->fd);
746 if (merge == -1) {
747 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
748 goto error;
749 }
750 close(impl->fd);
751 close(submit->in_fence);
752 impl->fd = -1;
753 submit->in_fence = merge;
754 }
755 break;
756
757 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
758 result = anv_queue_submit_add_syncobj(submit, device,
759 impl->syncobj,
760 I915_EXEC_FENCE_WAIT);
761 if (result != VK_SUCCESS)
762 goto error;
763 break;
764 }
765
766 case ANV_SEMAPHORE_TYPE_TIMELINE:
767 result = anv_queue_submit_add_timeline_wait(submit, device,
768 &impl->timeline,
769 in_values ? in_values[i] : 0);
770 if (result != VK_SUCCESS)
771 goto error;
772 break;
773
774 default:
775 break;
776 }
777 }
778
779 for (uint32_t i = 0; i < num_out_semaphores; i++) {
780 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
781
782 /* Under most circumstances, out fences won't be temporary. However,
783 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
784 *
785 * "If the import is temporary, the implementation must restore the
786 * semaphore to its prior permanent state after submitting the next
787 * semaphore wait operation."
788 *
789 * The spec says nothing whatsoever about signal operations on
790 * temporarily imported semaphores so it appears they are allowed.
791 * There are also CTS tests that require this to work.
792 */
793 struct anv_semaphore_impl *impl =
794 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
795 &semaphore->temporary : &semaphore->permanent;
796
797 switch (impl->type) {
798 case ANV_SEMAPHORE_TYPE_BO:
799 assert(!pdevice->has_syncobj);
800 result = anv_queue_submit_add_fence_bo(submit, impl->bo, true /* signal */);
801 if (result != VK_SUCCESS)
802 goto error;
803 break;
804
805 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
806 assert(!pdevice->has_syncobj);
807 result = anv_queue_submit_add_sync_fd_fence(submit, semaphore);
808 if (result != VK_SUCCESS)
809 goto error;
810 break;
811
812 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
813 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
814 I915_EXEC_FENCE_SIGNAL);
815 if (result != VK_SUCCESS)
816 goto error;
817 break;
818 }
819
820 case ANV_SEMAPHORE_TYPE_TIMELINE:
821 result = anv_queue_submit_add_timeline_signal(submit, device,
822 &impl->timeline,
823 out_values ? out_values[i] : 0);
824 if (result != VK_SUCCESS)
825 goto error;
826 break;
827
828 default:
829 break;
830 }
831 }
832
833 if (wsi_signal_bo) {
834 result = anv_queue_submit_add_fence_bo(submit, wsi_signal_bo, true /* signal */);
835 if (result != VK_SUCCESS)
836 goto error;
837 }
838
839 if (fence) {
840 /* Under most circumstances, out fences won't be temporary. However,
841 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
842 *
843 * "If the import is temporary, the implementation must restore the
844 * semaphore to its prior permanent state after submitting the next
845 * semaphore wait operation."
846 *
847 * The spec says nothing whatsoever about signal operations on
848 * temporarily imported semaphores so it appears they are allowed.
849 * There are also CTS tests that require this to work.
850 */
851 struct anv_fence_impl *impl =
852 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
853 &fence->temporary : &fence->permanent;
854
855 switch (impl->type) {
856 case ANV_FENCE_TYPE_BO:
857 result = anv_queue_submit_add_fence_bo(submit, impl->bo.bo, true /* signal */);
858 if (result != VK_SUCCESS)
859 goto error;
860 break;
861
862 case ANV_FENCE_TYPE_SYNCOBJ: {
863 /*
864 * For the same reason we reset the signaled binary syncobj above,
865 * also reset the fence's syncobj so that they don't contain a
866 * signaled dma-fence.
867 */
868 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
869 I915_EXEC_FENCE_SIGNAL);
870 if (result != VK_SUCCESS)
871 goto error;
872 break;
873 }
874
875 default:
876 unreachable("Invalid fence type");
877 }
878 }
879
880 result = _anv_queue_submit(queue, &submit);
881 if (result != VK_SUCCESS)
882 goto error;
883
884 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
885 /* BO fences can't be shared, so they can't be temporary. */
886 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
887
888 /* Once the execbuf has returned, we need to set the fence state to
889 * SUBMITTED. We can't do this before calling execbuf because
890 * anv_GetFenceStatus does take the global device lock before checking
891 * fence->state.
892 *
893 * We set the fence state to SUBMITTED regardless of whether or not the
894 * execbuf succeeds because we need to ensure that vkWaitForFences() and
895 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
896 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
897 */
898 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
899 }
900
901 error:
902 if (submit)
903 anv_queue_submit_free(device, submit);
904
905 return result;
906 }
907
908 VkResult anv_QueueSubmit(
909 VkQueue _queue,
910 uint32_t submitCount,
911 const VkSubmitInfo* pSubmits,
912 VkFence fence)
913 {
914 ANV_FROM_HANDLE(anv_queue, queue, _queue);
915
916 /* Query for device status prior to submitting. Technically, we don't need
917 * to do this. However, if we have a client that's submitting piles of
918 * garbage, we would rather break as early as possible to keep the GPU
919 * hanging contained. If we don't check here, we'll either be waiting for
920 * the kernel to kick us or we'll have to wait until the client waits on a
921 * fence before we actually know whether or not we've hung.
922 */
923 VkResult result = anv_device_query_status(queue->device);
924 if (result != VK_SUCCESS)
925 return result;
926
927 if (fence && submitCount == 0) {
928 /* If we don't have any command buffers, we need to submit a dummy
929 * batch to give GEM something to wait on. We could, potentially,
930 * come up with something more efficient but this shouldn't be a
931 * common case.
932 */
933 result = anv_queue_submit(queue, NULL, NULL, NULL, 0, NULL, NULL, 0,
934 NULL, fence);
935 goto out;
936 }
937
938 for (uint32_t i = 0; i < submitCount; i++) {
939 /* Fence for this submit. NULL for all but the last one */
940 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
941
942 const struct wsi_memory_signal_submit_info *mem_signal_info =
943 vk_find_struct_const(pSubmits[i].pNext,
944 WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
945 struct anv_bo *wsi_signal_bo =
946 mem_signal_info && mem_signal_info->memory != VK_NULL_HANDLE ?
947 anv_device_memory_from_handle(mem_signal_info->memory)->bo : NULL;
948
949 const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
950 vk_find_struct_const(pSubmits[i].pNext,
951 TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
952 const uint64_t *wait_values =
953 timeline_info && timeline_info->waitSemaphoreValueCount ?
954 timeline_info->pWaitSemaphoreValues : NULL;
955 const uint64_t *signal_values =
956 timeline_info && timeline_info->signalSemaphoreValueCount ?
957 timeline_info->pSignalSemaphoreValues : NULL;
958
959 if (pSubmits[i].commandBufferCount == 0) {
960 /* If we don't have any command buffers, we need to submit a dummy
961 * batch to give GEM something to wait on. We could, potentially,
962 * come up with something more efficient but this shouldn't be a
963 * common case.
964 */
965 result = anv_queue_submit(queue, NULL,
966 pSubmits[i].pWaitSemaphores,
967 wait_values,
968 pSubmits[i].waitSemaphoreCount,
969 pSubmits[i].pSignalSemaphores,
970 signal_values,
971 pSubmits[i].signalSemaphoreCount,
972 wsi_signal_bo,
973 submit_fence);
974 if (result != VK_SUCCESS)
975 goto out;
976
977 continue;
978 }
979
980 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
981 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
982 pSubmits[i].pCommandBuffers[j]);
983 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
984 assert(!anv_batch_has_error(&cmd_buffer->batch));
985
986 /* Fence for this execbuf. NULL for all but the last one */
987 VkFence execbuf_fence =
988 (j == pSubmits[i].commandBufferCount - 1) ?
989 submit_fence : VK_NULL_HANDLE;
990
991 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
992 const uint64_t *in_values = NULL, *out_values = NULL;
993 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
994 if (j == 0) {
995 /* Only the first batch gets the in semaphores */
996 in_semaphores = pSubmits[i].pWaitSemaphores;
997 in_values = wait_values;
998 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
999 }
1000
1001 if (j == pSubmits[i].commandBufferCount - 1) {
1002 /* Only the last batch gets the out semaphores */
1003 out_semaphores = pSubmits[i].pSignalSemaphores;
1004 out_values = signal_values;
1005 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
1006 }
1007
1008 result = anv_queue_submit(queue, cmd_buffer,
1009 in_semaphores, in_values, num_in_semaphores,
1010 out_semaphores, out_values, num_out_semaphores,
1011 wsi_signal_bo, execbuf_fence);
1012 if (result != VK_SUCCESS)
1013 goto out;
1014 }
1015 }
1016
1017 out:
1018 if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) {
1019 /* In the case that something has gone wrong we may end up with an
1020 * inconsistent state from which it may not be trivial to recover.
1021 * For example, we might have computed address relocations and
1022 * any future attempt to re-submit this job will need to know about
1023 * this and avoid computing relocation addresses again.
1024 *
1025 * To avoid this sort of issues, we assume that if something was
1026 * wrong during submission we must already be in a really bad situation
1027 * anyway (such us being out of memory) and return
1028 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
1029 * submit the same job again to this device.
1030 *
1031 * We skip doing this on VK_ERROR_DEVICE_LOST because
1032 * anv_device_set_lost() would have been called already by a callee of
1033 * anv_queue_submit().
1034 */
1035 result = anv_device_set_lost(queue->device, "vkQueueSubmit() failed");
1036 }
1037
1038 return result;
1039 }
1040
1041 VkResult anv_QueueWaitIdle(
1042 VkQueue _queue)
1043 {
1044 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1045
1046 if (anv_device_is_lost(queue->device))
1047 return VK_ERROR_DEVICE_LOST;
1048
1049 return anv_queue_submit_simple_batch(queue, NULL);
1050 }
1051
1052 VkResult anv_CreateFence(
1053 VkDevice _device,
1054 const VkFenceCreateInfo* pCreateInfo,
1055 const VkAllocationCallbacks* pAllocator,
1056 VkFence* pFence)
1057 {
1058 ANV_FROM_HANDLE(anv_device, device, _device);
1059 struct anv_fence *fence;
1060
1061 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1062
1063 fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
1064 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1065 if (fence == NULL)
1066 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1067
1068 if (device->instance->physicalDevice.has_syncobj_wait) {
1069 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
1070
1071 uint32_t create_flags = 0;
1072 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
1073 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1074
1075 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
1076 if (!fence->permanent.syncobj)
1077 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1078 } else {
1079 fence->permanent.type = ANV_FENCE_TYPE_BO;
1080
1081 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
1082 &fence->permanent.bo.bo);
1083 if (result != VK_SUCCESS)
1084 return result;
1085
1086 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
1087 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1088 } else {
1089 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
1090 }
1091 }
1092
1093 *pFence = anv_fence_to_handle(fence);
1094
1095 return VK_SUCCESS;
1096 }
1097
1098 static void
1099 anv_fence_impl_cleanup(struct anv_device *device,
1100 struct anv_fence_impl *impl)
1101 {
1102 switch (impl->type) {
1103 case ANV_FENCE_TYPE_NONE:
1104 /* Dummy. Nothing to do */
1105 break;
1106
1107 case ANV_FENCE_TYPE_BO:
1108 anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
1109 break;
1110
1111 case ANV_FENCE_TYPE_SYNCOBJ:
1112 anv_gem_syncobj_destroy(device, impl->syncobj);
1113 break;
1114
1115 case ANV_FENCE_TYPE_WSI:
1116 impl->fence_wsi->destroy(impl->fence_wsi);
1117 break;
1118
1119 default:
1120 unreachable("Invalid fence type");
1121 }
1122
1123 impl->type = ANV_FENCE_TYPE_NONE;
1124 }
1125
1126 void
1127 anv_fence_reset_temporary(struct anv_device *device,
1128 struct anv_fence *fence)
1129 {
1130 if (fence->temporary.type == ANV_FENCE_TYPE_NONE)
1131 return;
1132
1133 anv_fence_impl_cleanup(device, &fence->temporary);
1134 }
1135
1136 void anv_DestroyFence(
1137 VkDevice _device,
1138 VkFence _fence,
1139 const VkAllocationCallbacks* pAllocator)
1140 {
1141 ANV_FROM_HANDLE(anv_device, device, _device);
1142 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1143
1144 if (!fence)
1145 return;
1146
1147 anv_fence_impl_cleanup(device, &fence->temporary);
1148 anv_fence_impl_cleanup(device, &fence->permanent);
1149
1150 vk_free2(&device->alloc, pAllocator, fence);
1151 }
1152
1153 VkResult anv_ResetFences(
1154 VkDevice _device,
1155 uint32_t fenceCount,
1156 const VkFence* pFences)
1157 {
1158 ANV_FROM_HANDLE(anv_device, device, _device);
1159
1160 for (uint32_t i = 0; i < fenceCount; i++) {
1161 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1162
1163 /* From the Vulkan 1.0.53 spec:
1164 *
1165 * "If any member of pFences currently has its payload imported with
1166 * temporary permanence, that fence’s prior permanent payload is
1167 * first restored. The remaining operations described therefore
1168 * operate on the restored payload.
1169 */
1170 anv_fence_reset_temporary(device, fence);
1171
1172 struct anv_fence_impl *impl = &fence->permanent;
1173
1174 switch (impl->type) {
1175 case ANV_FENCE_TYPE_BO:
1176 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
1177 break;
1178
1179 case ANV_FENCE_TYPE_SYNCOBJ:
1180 anv_gem_syncobj_reset(device, impl->syncobj);
1181 break;
1182
1183 default:
1184 unreachable("Invalid fence type");
1185 }
1186 }
1187
1188 return VK_SUCCESS;
1189 }
1190
1191 VkResult anv_GetFenceStatus(
1192 VkDevice _device,
1193 VkFence _fence)
1194 {
1195 ANV_FROM_HANDLE(anv_device, device, _device);
1196 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1197
1198 if (anv_device_is_lost(device))
1199 return VK_ERROR_DEVICE_LOST;
1200
1201 struct anv_fence_impl *impl =
1202 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1203 &fence->temporary : &fence->permanent;
1204
1205 switch (impl->type) {
1206 case ANV_FENCE_TYPE_BO:
1207 /* BO fences don't support import/export */
1208 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1209 switch (impl->bo.state) {
1210 case ANV_BO_FENCE_STATE_RESET:
1211 /* If it hasn't even been sent off to the GPU yet, it's not ready */
1212 return VK_NOT_READY;
1213
1214 case ANV_BO_FENCE_STATE_SIGNALED:
1215 /* It's been signaled, return success */
1216 return VK_SUCCESS;
1217
1218 case ANV_BO_FENCE_STATE_SUBMITTED: {
1219 VkResult result = anv_device_bo_busy(device, impl->bo.bo);
1220 if (result == VK_SUCCESS) {
1221 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1222 return VK_SUCCESS;
1223 } else {
1224 return result;
1225 }
1226 }
1227 default:
1228 unreachable("Invalid fence status");
1229 }
1230
1231 case ANV_FENCE_TYPE_SYNCOBJ: {
1232 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
1233 if (ret == -1) {
1234 if (errno == ETIME) {
1235 return VK_NOT_READY;
1236 } else {
1237 /* We don't know the real error. */
1238 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1239 }
1240 } else {
1241 return VK_SUCCESS;
1242 }
1243 }
1244
1245 default:
1246 unreachable("Invalid fence type");
1247 }
1248 }
1249
1250 static VkResult
1251 anv_wait_for_syncobj_fences(struct anv_device *device,
1252 uint32_t fenceCount,
1253 const VkFence *pFences,
1254 bool waitAll,
1255 uint64_t abs_timeout_ns)
1256 {
1257 uint32_t *syncobjs = vk_zalloc(&device->alloc,
1258 sizeof(*syncobjs) * fenceCount, 8,
1259 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1260 if (!syncobjs)
1261 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1262
1263 for (uint32_t i = 0; i < fenceCount; i++) {
1264 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1265 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
1266
1267 struct anv_fence_impl *impl =
1268 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1269 &fence->temporary : &fence->permanent;
1270
1271 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1272 syncobjs[i] = impl->syncobj;
1273 }
1274
1275 /* The gem_syncobj_wait ioctl may return early due to an inherent
1276 * limitation in the way it computes timeouts. Loop until we've actually
1277 * passed the timeout.
1278 */
1279 int ret;
1280 do {
1281 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
1282 abs_timeout_ns, waitAll);
1283 } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
1284
1285 vk_free(&device->alloc, syncobjs);
1286
1287 if (ret == -1) {
1288 if (errno == ETIME) {
1289 return VK_TIMEOUT;
1290 } else {
1291 /* We don't know the real error. */
1292 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1293 }
1294 } else {
1295 return VK_SUCCESS;
1296 }
1297 }
1298
1299 static VkResult
1300 anv_wait_for_bo_fences(struct anv_device *device,
1301 uint32_t fenceCount,
1302 const VkFence *pFences,
1303 bool waitAll,
1304 uint64_t abs_timeout_ns)
1305 {
1306 VkResult result = VK_SUCCESS;
1307 uint32_t pending_fences = fenceCount;
1308 while (pending_fences) {
1309 pending_fences = 0;
1310 bool signaled_fences = false;
1311 for (uint32_t i = 0; i < fenceCount; i++) {
1312 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1313
1314 /* This function assumes that all fences are BO fences and that they
1315 * have no temporary state. Since BO fences will never be exported,
1316 * this should be a safe assumption.
1317 */
1318 assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
1319 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1320 struct anv_fence_impl *impl = &fence->permanent;
1321
1322 switch (impl->bo.state) {
1323 case ANV_BO_FENCE_STATE_RESET:
1324 /* This fence hasn't been submitted yet, we'll catch it the next
1325 * time around. Yes, this may mean we dead-loop but, short of
1326 * lots of locking and a condition variable, there's not much that
1327 * we can do about that.
1328 */
1329 pending_fences++;
1330 continue;
1331
1332 case ANV_BO_FENCE_STATE_SIGNALED:
1333 /* This fence is not pending. If waitAll isn't set, we can return
1334 * early. Otherwise, we have to keep going.
1335 */
1336 if (!waitAll) {
1337 result = VK_SUCCESS;
1338 goto done;
1339 }
1340 continue;
1341
1342 case ANV_BO_FENCE_STATE_SUBMITTED:
1343 /* These are the fences we really care about. Go ahead and wait
1344 * on it until we hit a timeout.
1345 */
1346 result = anv_device_wait(device, impl->bo.bo,
1347 anv_get_relative_timeout(abs_timeout_ns));
1348 switch (result) {
1349 case VK_SUCCESS:
1350 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1351 signaled_fences = true;
1352 if (!waitAll)
1353 goto done;
1354 break;
1355
1356 case VK_TIMEOUT:
1357 goto done;
1358
1359 default:
1360 return result;
1361 }
1362 }
1363 }
1364
1365 if (pending_fences && !signaled_fences) {
1366 /* If we've hit this then someone decided to vkWaitForFences before
1367 * they've actually submitted any of them to a queue. This is a
1368 * fairly pessimal case, so it's ok to lock here and use a standard
1369 * pthreads condition variable.
1370 */
1371 pthread_mutex_lock(&device->mutex);
1372
1373 /* It's possible that some of the fences have changed state since the
1374 * last time we checked. Now that we have the lock, check for
1375 * pending fences again and don't wait if it's changed.
1376 */
1377 uint32_t now_pending_fences = 0;
1378 for (uint32_t i = 0; i < fenceCount; i++) {
1379 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1380 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
1381 now_pending_fences++;
1382 }
1383 assert(now_pending_fences <= pending_fences);
1384
1385 if (now_pending_fences == pending_fences) {
1386 struct timespec abstime = {
1387 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
1388 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
1389 };
1390
1391 ASSERTED int ret;
1392 ret = pthread_cond_timedwait(&device->queue_submit,
1393 &device->mutex, &abstime);
1394 assert(ret != EINVAL);
1395 if (anv_gettime_ns() >= abs_timeout_ns) {
1396 pthread_mutex_unlock(&device->mutex);
1397 result = VK_TIMEOUT;
1398 goto done;
1399 }
1400 }
1401
1402 pthread_mutex_unlock(&device->mutex);
1403 }
1404 }
1405
1406 done:
1407 if (anv_device_is_lost(device))
1408 return VK_ERROR_DEVICE_LOST;
1409
1410 return result;
1411 }
1412
1413 static VkResult
1414 anv_wait_for_wsi_fence(struct anv_device *device,
1415 const VkFence _fence,
1416 uint64_t abs_timeout)
1417 {
1418 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1419 struct anv_fence_impl *impl = &fence->permanent;
1420
1421 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
1422 }
1423
1424 static VkResult
1425 anv_wait_for_fences(struct anv_device *device,
1426 uint32_t fenceCount,
1427 const VkFence *pFences,
1428 bool waitAll,
1429 uint64_t abs_timeout)
1430 {
1431 VkResult result = VK_SUCCESS;
1432
1433 if (fenceCount <= 1 || waitAll) {
1434 for (uint32_t i = 0; i < fenceCount; i++) {
1435 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1436 switch (fence->permanent.type) {
1437 case ANV_FENCE_TYPE_BO:
1438 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
1439 true, abs_timeout);
1440 break;
1441 case ANV_FENCE_TYPE_SYNCOBJ:
1442 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
1443 true, abs_timeout);
1444 break;
1445 case ANV_FENCE_TYPE_WSI:
1446 result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
1447 break;
1448 case ANV_FENCE_TYPE_NONE:
1449 result = VK_SUCCESS;
1450 break;
1451 }
1452 if (result != VK_SUCCESS)
1453 return result;
1454 }
1455 } else {
1456 do {
1457 for (uint32_t i = 0; i < fenceCount; i++) {
1458 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
1459 return VK_SUCCESS;
1460 }
1461 } while (anv_gettime_ns() < abs_timeout);
1462 result = VK_TIMEOUT;
1463 }
1464 return result;
1465 }
1466
1467 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
1468 {
1469 for (uint32_t i = 0; i < fenceCount; ++i) {
1470 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1471 if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
1472 return false;
1473 }
1474 return true;
1475 }
1476
1477 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
1478 {
1479 for (uint32_t i = 0; i < fenceCount; ++i) {
1480 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1481 if (fence->permanent.type != ANV_FENCE_TYPE_BO)
1482 return false;
1483 }
1484 return true;
1485 }
1486
1487 VkResult anv_WaitForFences(
1488 VkDevice _device,
1489 uint32_t fenceCount,
1490 const VkFence* pFences,
1491 VkBool32 waitAll,
1492 uint64_t timeout)
1493 {
1494 ANV_FROM_HANDLE(anv_device, device, _device);
1495
1496 if (anv_device_is_lost(device))
1497 return VK_ERROR_DEVICE_LOST;
1498
1499 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
1500 if (anv_all_fences_syncobj(fenceCount, pFences)) {
1501 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
1502 waitAll, abs_timeout);
1503 } else if (anv_all_fences_bo(fenceCount, pFences)) {
1504 return anv_wait_for_bo_fences(device, fenceCount, pFences,
1505 waitAll, abs_timeout);
1506 } else {
1507 return anv_wait_for_fences(device, fenceCount, pFences,
1508 waitAll, abs_timeout);
1509 }
1510 }
1511
1512 void anv_GetPhysicalDeviceExternalFenceProperties(
1513 VkPhysicalDevice physicalDevice,
1514 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
1515 VkExternalFenceProperties* pExternalFenceProperties)
1516 {
1517 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1518
1519 switch (pExternalFenceInfo->handleType) {
1520 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1521 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1522 if (device->has_syncobj_wait) {
1523 pExternalFenceProperties->exportFromImportedHandleTypes =
1524 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1525 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1526 pExternalFenceProperties->compatibleHandleTypes =
1527 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1528 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1529 pExternalFenceProperties->externalFenceFeatures =
1530 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
1531 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
1532 return;
1533 }
1534 break;
1535
1536 default:
1537 break;
1538 }
1539
1540 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1541 pExternalFenceProperties->compatibleHandleTypes = 0;
1542 pExternalFenceProperties->externalFenceFeatures = 0;
1543 }
1544
1545 VkResult anv_ImportFenceFdKHR(
1546 VkDevice _device,
1547 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
1548 {
1549 ANV_FROM_HANDLE(anv_device, device, _device);
1550 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
1551 int fd = pImportFenceFdInfo->fd;
1552
1553 assert(pImportFenceFdInfo->sType ==
1554 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
1555
1556 struct anv_fence_impl new_impl = {
1557 .type = ANV_FENCE_TYPE_NONE,
1558 };
1559
1560 switch (pImportFenceFdInfo->handleType) {
1561 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1562 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1563
1564 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1565 if (!new_impl.syncobj)
1566 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1567
1568 break;
1569
1570 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1571 /* Sync files are a bit tricky. Because we want to continue using the
1572 * syncobj implementation of WaitForFences, we don't use the sync file
1573 * directly but instead import it into a syncobj.
1574 */
1575 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1576
1577 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
1578 if (!new_impl.syncobj)
1579 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1580
1581 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1582 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1583 return vk_errorf(device->instance, NULL,
1584 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1585 "syncobj sync file import failed: %m");
1586 }
1587 break;
1588
1589 default:
1590 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1591 }
1592
1593 /* From the Vulkan 1.0.53 spec:
1594 *
1595 * "Importing a fence payload from a file descriptor transfers
1596 * ownership of the file descriptor from the application to the
1597 * Vulkan implementation. The application must not perform any
1598 * operations on the file descriptor after a successful import."
1599 *
1600 * If the import fails, we leave the file descriptor open.
1601 */
1602 close(fd);
1603
1604 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
1605 anv_fence_impl_cleanup(device, &fence->temporary);
1606 fence->temporary = new_impl;
1607 } else {
1608 anv_fence_impl_cleanup(device, &fence->permanent);
1609 fence->permanent = new_impl;
1610 }
1611
1612 return VK_SUCCESS;
1613 }
1614
1615 VkResult anv_GetFenceFdKHR(
1616 VkDevice _device,
1617 const VkFenceGetFdInfoKHR* pGetFdInfo,
1618 int* pFd)
1619 {
1620 ANV_FROM_HANDLE(anv_device, device, _device);
1621 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
1622
1623 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
1624
1625 struct anv_fence_impl *impl =
1626 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1627 &fence->temporary : &fence->permanent;
1628
1629 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1630 switch (pGetFdInfo->handleType) {
1631 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
1632 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1633 if (fd < 0)
1634 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1635
1636 *pFd = fd;
1637 break;
1638 }
1639
1640 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
1641 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1642 if (fd < 0)
1643 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1644
1645 *pFd = fd;
1646 break;
1647 }
1648
1649 default:
1650 unreachable("Invalid fence export handle type");
1651 }
1652
1653 /* From the Vulkan 1.0.53 spec:
1654 *
1655 * "Export operations have the same transference as the specified handle
1656 * type’s import operations. [...] If the fence was using a
1657 * temporarily imported payload, the fence’s prior permanent payload
1658 * will be restored.
1659 */
1660 if (impl == &fence->temporary)
1661 anv_fence_impl_cleanup(device, impl);
1662
1663 return VK_SUCCESS;
1664 }
1665
1666 // Queue semaphore functions
1667
1668 static VkSemaphoreTypeKHR
1669 get_semaphore_type(const void *pNext, uint64_t *initial_value)
1670 {
1671 const VkSemaphoreTypeCreateInfoKHR *type_info =
1672 vk_find_struct_const(pNext, SEMAPHORE_TYPE_CREATE_INFO_KHR);
1673
1674 if (!type_info)
1675 return VK_SEMAPHORE_TYPE_BINARY_KHR;
1676
1677 if (initial_value)
1678 *initial_value = type_info->initialValue;
1679 return type_info->semaphoreType;
1680 }
1681
1682 static VkResult
1683 binary_semaphore_create(struct anv_device *device,
1684 struct anv_semaphore_impl *impl,
1685 bool exportable)
1686 {
1687 if (device->instance->physicalDevice.has_syncobj) {
1688 impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1689 impl->syncobj = anv_gem_syncobj_create(device, 0);
1690 if (!impl->syncobj)
1691 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1692 return VK_SUCCESS;
1693 } else {
1694 impl->type = ANV_SEMAPHORE_TYPE_BO;
1695 VkResult result =
1696 anv_device_alloc_bo(device, 4096,
1697 ANV_BO_ALLOC_EXTERNAL |
1698 ANV_BO_ALLOC_IMPLICIT_SYNC,
1699 0 /* explicit_address */,
1700 &impl->bo);
1701 /* If we're going to use this as a fence, we need to *not* have the
1702 * EXEC_OBJECT_ASYNC bit set.
1703 */
1704 assert(!(impl->bo->flags & EXEC_OBJECT_ASYNC));
1705 return result;
1706 }
1707 }
1708
1709 static VkResult
1710 timeline_semaphore_create(struct anv_device *device,
1711 struct anv_semaphore_impl *impl,
1712 uint64_t initial_value)
1713 {
1714 impl->type = ANV_SEMAPHORE_TYPE_TIMELINE;
1715 anv_timeline_init(device, &impl->timeline, initial_value);
1716 return VK_SUCCESS;
1717 }
1718
1719 VkResult anv_CreateSemaphore(
1720 VkDevice _device,
1721 const VkSemaphoreCreateInfo* pCreateInfo,
1722 const VkAllocationCallbacks* pAllocator,
1723 VkSemaphore* pSemaphore)
1724 {
1725 ANV_FROM_HANDLE(anv_device, device, _device);
1726 struct anv_semaphore *semaphore;
1727
1728 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
1729
1730 uint64_t timeline_value = 0;
1731 VkSemaphoreTypeKHR sem_type = get_semaphore_type(pCreateInfo->pNext, &timeline_value);
1732
1733 semaphore = vk_alloc(&device->alloc, sizeof(*semaphore), 8,
1734 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1735 if (semaphore == NULL)
1736 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1737
1738 p_atomic_set(&semaphore->refcount, 1);
1739
1740 const VkExportSemaphoreCreateInfo *export =
1741 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
1742 VkExternalSemaphoreHandleTypeFlags handleTypes =
1743 export ? export->handleTypes : 0;
1744 VkResult result;
1745
1746 if (handleTypes == 0) {
1747 if (sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR)
1748 result = binary_semaphore_create(device, &semaphore->permanent, false);
1749 else
1750 result = timeline_semaphore_create(device, &semaphore->permanent, timeline_value);
1751 if (result != VK_SUCCESS) {
1752 vk_free2(&device->alloc, pAllocator, semaphore);
1753 return result;
1754 }
1755 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
1756 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1757 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1758 result = binary_semaphore_create(device, &semaphore->permanent, true);
1759 if (result != VK_SUCCESS) {
1760 vk_free2(&device->alloc, pAllocator, semaphore);
1761 return result;
1762 }
1763 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
1764 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
1765 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1766 if (device->instance->physicalDevice.has_syncobj) {
1767 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1768 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
1769 if (!semaphore->permanent.syncobj) {
1770 vk_free2(&device->alloc, pAllocator, semaphore);
1771 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1772 }
1773 } else {
1774 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
1775 semaphore->permanent.fd = -1;
1776 }
1777 } else {
1778 assert(!"Unknown handle type");
1779 vk_free2(&device->alloc, pAllocator, semaphore);
1780 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1781 }
1782
1783 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
1784
1785 *pSemaphore = anv_semaphore_to_handle(semaphore);
1786
1787 return VK_SUCCESS;
1788 }
1789
1790 static void
1791 anv_semaphore_impl_cleanup(struct anv_device *device,
1792 struct anv_semaphore_impl *impl)
1793 {
1794 switch (impl->type) {
1795 case ANV_SEMAPHORE_TYPE_NONE:
1796 case ANV_SEMAPHORE_TYPE_DUMMY:
1797 /* Dummy. Nothing to do */
1798 break;
1799
1800 case ANV_SEMAPHORE_TYPE_BO:
1801 anv_device_release_bo(device, impl->bo);
1802 break;
1803
1804 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1805 close(impl->fd);
1806 break;
1807
1808 case ANV_SEMAPHORE_TYPE_TIMELINE:
1809 anv_timeline_finish(device, &impl->timeline);
1810 break;
1811
1812 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1813 anv_gem_syncobj_destroy(device, impl->syncobj);
1814 break;
1815
1816 default:
1817 unreachable("Invalid semaphore type");
1818 }
1819
1820 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1821 }
1822
1823 void
1824 anv_semaphore_reset_temporary(struct anv_device *device,
1825 struct anv_semaphore *semaphore)
1826 {
1827 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1828 return;
1829
1830 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1831 }
1832
1833 static struct anv_semaphore *
1834 anv_semaphore_ref(struct anv_semaphore *semaphore)
1835 {
1836 assert(semaphore->refcount);
1837 p_atomic_inc(&semaphore->refcount);
1838 return semaphore;
1839 }
1840
1841 static void
1842 anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore)
1843 {
1844 if (!p_atomic_dec_zero(&semaphore->refcount))
1845 return;
1846
1847 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1848 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1849 vk_free(&device->alloc, semaphore);
1850 }
1851
1852 void anv_DestroySemaphore(
1853 VkDevice _device,
1854 VkSemaphore _semaphore,
1855 const VkAllocationCallbacks* pAllocator)
1856 {
1857 ANV_FROM_HANDLE(anv_device, device, _device);
1858 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1859
1860 if (semaphore == NULL)
1861 return;
1862
1863 anv_semaphore_unref(device, semaphore);
1864 }
1865
1866 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1867 VkPhysicalDevice physicalDevice,
1868 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1869 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1870 {
1871 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1872
1873 VkSemaphoreTypeKHR sem_type =
1874 get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
1875
1876 switch (pExternalSemaphoreInfo->handleType) {
1877 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1878 /* Timeline semaphores are not exportable. */
1879 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1880 break;
1881 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1882 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1883 pExternalSemaphoreProperties->compatibleHandleTypes =
1884 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1885 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1886 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1887 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1888 return;
1889
1890 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1891 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1892 break;
1893 if (!device->has_exec_fence)
1894 break;
1895 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1896 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1897 pExternalSemaphoreProperties->compatibleHandleTypes =
1898 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1899 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1900 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1901 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1902 return;
1903
1904 default:
1905 break;
1906 }
1907
1908 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1909 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1910 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1911 }
1912
1913 VkResult anv_ImportSemaphoreFdKHR(
1914 VkDevice _device,
1915 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1916 {
1917 ANV_FROM_HANDLE(anv_device, device, _device);
1918 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1919 int fd = pImportSemaphoreFdInfo->fd;
1920
1921 struct anv_semaphore_impl new_impl = {
1922 .type = ANV_SEMAPHORE_TYPE_NONE,
1923 };
1924
1925 switch (pImportSemaphoreFdInfo->handleType) {
1926 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1927 if (device->instance->physicalDevice.has_syncobj) {
1928 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1929
1930 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1931 if (!new_impl.syncobj)
1932 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1933 } else {
1934 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1935
1936 VkResult result = anv_device_import_bo(device, fd,
1937 ANV_BO_ALLOC_EXTERNAL |
1938 ANV_BO_ALLOC_IMPLICIT_SYNC,
1939 0 /* client_address */,
1940 &new_impl.bo);
1941 if (result != VK_SUCCESS)
1942 return result;
1943
1944 if (new_impl.bo->size < 4096) {
1945 anv_device_release_bo(device, new_impl.bo);
1946 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1947 }
1948
1949 /* If we're going to use this as a fence, we need to *not* have the
1950 * EXEC_OBJECT_ASYNC bit set.
1951 */
1952 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1953 }
1954
1955 /* From the Vulkan spec:
1956 *
1957 * "Importing semaphore state from a file descriptor transfers
1958 * ownership of the file descriptor from the application to the
1959 * Vulkan implementation. The application must not perform any
1960 * operations on the file descriptor after a successful import."
1961 *
1962 * If the import fails, we leave the file descriptor open.
1963 */
1964 close(fd);
1965 break;
1966
1967 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1968 if (device->instance->physicalDevice.has_syncobj) {
1969 new_impl = (struct anv_semaphore_impl) {
1970 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
1971 .syncobj = anv_gem_syncobj_create(device, 0),
1972 };
1973 if (!new_impl.syncobj)
1974 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1975 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1976 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1977 return vk_errorf(device->instance, NULL,
1978 VK_ERROR_INVALID_EXTERNAL_HANDLE,
1979 "syncobj sync file import failed: %m");
1980 }
1981 /* Ownership of the FD is transfered to Anv. Since we don't need it
1982 * anymore because the associated fence has been put into a syncobj,
1983 * we must close the FD.
1984 */
1985 close(fd);
1986 } else {
1987 new_impl = (struct anv_semaphore_impl) {
1988 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1989 .fd = fd,
1990 };
1991 }
1992 break;
1993
1994 default:
1995 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1996 }
1997
1998 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1999 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
2000 semaphore->temporary = new_impl;
2001 } else {
2002 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
2003 semaphore->permanent = new_impl;
2004 }
2005
2006 return VK_SUCCESS;
2007 }
2008
2009 VkResult anv_GetSemaphoreFdKHR(
2010 VkDevice _device,
2011 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
2012 int* pFd)
2013 {
2014 ANV_FROM_HANDLE(anv_device, device, _device);
2015 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
2016 VkResult result;
2017 int fd;
2018
2019 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
2020
2021 struct anv_semaphore_impl *impl =
2022 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2023 &semaphore->temporary : &semaphore->permanent;
2024
2025 switch (impl->type) {
2026 case ANV_SEMAPHORE_TYPE_BO:
2027 result = anv_device_export_bo(device, impl->bo, pFd);
2028 if (result != VK_SUCCESS)
2029 return result;
2030 break;
2031
2032 case ANV_SEMAPHORE_TYPE_SYNC_FILE: {
2033 /* There's a potential race here with vkQueueSubmit if you are trying
2034 * to export a semaphore Fd while the queue submit is still happening.
2035 * This can happen if we see all dependencies get resolved via timeline
2036 * semaphore waits completing before the execbuf completes and we
2037 * process the resulting out fence. To work around this, take a lock
2038 * around grabbing the fd.
2039 */
2040 pthread_mutex_lock(&device->mutex);
2041
2042 /* From the Vulkan 1.0.53 spec:
2043 *
2044 * "...exporting a semaphore payload to a handle with copy
2045 * transference has the same side effects on the source
2046 * semaphore’s payload as executing a semaphore wait operation."
2047 *
2048 * In other words, it may still be a SYNC_FD semaphore, but it's now
2049 * considered to have been waited on and no longer has a sync file
2050 * attached.
2051 */
2052 int fd = impl->fd;
2053 impl->fd = -1;
2054
2055 pthread_mutex_unlock(&device->mutex);
2056
2057 /* There are two reasons why this could happen:
2058 *
2059 * 1) The user is trying to export without submitting something that
2060 * signals the semaphore. If this is the case, it's their bug so
2061 * what we return here doesn't matter.
2062 *
2063 * 2) The kernel didn't give us a file descriptor. The most likely
2064 * reason for this is running out of file descriptors.
2065 */
2066 if (fd < 0)
2067 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2068
2069 *pFd = fd;
2070 return VK_SUCCESS;
2071 }
2072
2073 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
2074 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
2075 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
2076 else {
2077 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
2078 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
2079 }
2080 if (fd < 0)
2081 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2082 *pFd = fd;
2083 break;
2084
2085 default:
2086 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2087 }
2088
2089 /* From the Vulkan 1.0.53 spec:
2090 *
2091 * "Export operations have the same transference as the specified handle
2092 * type’s import operations. [...] If the semaphore was using a
2093 * temporarily imported payload, the semaphore’s prior permanent payload
2094 * will be restored.
2095 */
2096 if (impl == &semaphore->temporary)
2097 anv_semaphore_impl_cleanup(device, impl);
2098
2099 return VK_SUCCESS;
2100 }
2101
2102 VkResult anv_GetSemaphoreCounterValueKHR(
2103 VkDevice _device,
2104 VkSemaphore _semaphore,
2105 uint64_t* pValue)
2106 {
2107 ANV_FROM_HANDLE(anv_device, device, _device);
2108 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
2109
2110 struct anv_semaphore_impl *impl =
2111 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2112 &semaphore->temporary : &semaphore->permanent;
2113
2114 switch (impl->type) {
2115 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2116 pthread_mutex_lock(&device->mutex);
2117 *pValue = impl->timeline.highest_past;
2118 pthread_mutex_unlock(&device->mutex);
2119 return VK_SUCCESS;
2120 }
2121
2122 default:
2123 unreachable("Invalid semaphore type");
2124 }
2125 }
2126
2127 static VkResult
2128 anv_timeline_wait_locked(struct anv_device *device,
2129 struct anv_timeline *timeline,
2130 uint64_t serial, uint64_t abs_timeout_ns)
2131 {
2132 /* Wait on the queue_submit condition variable until the timeline has a
2133 * time point pending that's at least as high as serial.
2134 */
2135 while (timeline->highest_pending < serial) {
2136 struct timespec abstime = {
2137 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
2138 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
2139 };
2140
2141 int ret = pthread_cond_timedwait(&device->queue_submit,
2142 &device->mutex, &abstime);
2143 assert(ret != EINVAL);
2144 if (anv_gettime_ns() >= abs_timeout_ns &&
2145 timeline->highest_pending < serial)
2146 return VK_TIMEOUT;
2147 }
2148
2149 while (1) {
2150 VkResult result = anv_timeline_gc_locked(device, timeline);
2151 if (result != VK_SUCCESS)
2152 return result;
2153
2154 if (timeline->highest_past >= serial)
2155 return VK_SUCCESS;
2156
2157 /* If we got here, our earliest time point has a busy BO */
2158 struct anv_timeline_point *point =
2159 list_first_entry(&timeline->points,
2160 struct anv_timeline_point, link);
2161
2162 /* Drop the lock while we wait. */
2163 point->waiting++;
2164 pthread_mutex_unlock(&device->mutex);
2165
2166 result = anv_device_wait(device, point->bo,
2167 anv_get_relative_timeout(abs_timeout_ns));
2168
2169 /* Pick the mutex back up */
2170 pthread_mutex_lock(&device->mutex);
2171 point->waiting--;
2172
2173 /* This covers both VK_TIMEOUT and VK_ERROR_DEVICE_LOST */
2174 if (result != VK_SUCCESS)
2175 return result;
2176 }
2177 }
2178
2179 static VkResult
2180 anv_timelines_wait(struct anv_device *device,
2181 struct anv_timeline **timelines,
2182 const uint64_t *serials,
2183 uint32_t n_timelines,
2184 bool wait_all,
2185 uint64_t abs_timeout_ns)
2186 {
2187 if (!wait_all && n_timelines > 1) {
2188 while (1) {
2189 VkResult result;
2190 pthread_mutex_lock(&device->mutex);
2191 for (uint32_t i = 0; i < n_timelines; i++) {
2192 result =
2193 anv_timeline_wait_locked(device, timelines[i], serials[i], 0);
2194 if (result != VK_TIMEOUT)
2195 break;
2196 }
2197
2198 if (result != VK_TIMEOUT ||
2199 anv_gettime_ns() >= abs_timeout_ns) {
2200 pthread_mutex_unlock(&device->mutex);
2201 return result;
2202 }
2203
2204 /* If none of them are ready do a short wait so we don't completely
2205 * spin while holding the lock. The 10us is completely arbitrary.
2206 */
2207 uint64_t abs_short_wait_ns =
2208 anv_get_absolute_timeout(
2209 MIN2((anv_gettime_ns() - abs_timeout_ns) / 10, 10 * 1000));
2210 struct timespec abstime = {
2211 .tv_sec = abs_short_wait_ns / NSEC_PER_SEC,
2212 .tv_nsec = abs_short_wait_ns % NSEC_PER_SEC,
2213 };
2214 ASSERTED int ret;
2215 ret = pthread_cond_timedwait(&device->queue_submit,
2216 &device->mutex, &abstime);
2217 assert(ret != EINVAL);
2218 }
2219 } else {
2220 VkResult result = VK_SUCCESS;
2221 pthread_mutex_lock(&device->mutex);
2222 for (uint32_t i = 0; i < n_timelines; i++) {
2223 result =
2224 anv_timeline_wait_locked(device, timelines[i],
2225 serials[i], abs_timeout_ns);
2226 if (result != VK_SUCCESS)
2227 break;
2228 }
2229 pthread_mutex_unlock(&device->mutex);
2230 return result;
2231 }
2232 }
2233
2234 VkResult anv_WaitSemaphoresKHR(
2235 VkDevice _device,
2236 const VkSemaphoreWaitInfoKHR* pWaitInfo,
2237 uint64_t timeout)
2238 {
2239 ANV_FROM_HANDLE(anv_device, device, _device);
2240
2241 struct anv_timeline **timelines =
2242 vk_alloc(&device->alloc,
2243 pWaitInfo->semaphoreCount * sizeof(*timelines),
2244 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2245 if (!timelines)
2246 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2247
2248 uint64_t *values = vk_alloc(&device->alloc,
2249 pWaitInfo->semaphoreCount * sizeof(*values),
2250 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2251 if (!values) {
2252 vk_free(&device->alloc, timelines);
2253 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2254 }
2255
2256 uint32_t handle_count = 0;
2257 for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
2258 ANV_FROM_HANDLE(anv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
2259 struct anv_semaphore_impl *impl =
2260 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2261 &semaphore->temporary : &semaphore->permanent;
2262
2263 assert(impl->type == ANV_SEMAPHORE_TYPE_TIMELINE);
2264
2265 if (pWaitInfo->pValues[i] == 0)
2266 continue;
2267
2268 timelines[handle_count] = &impl->timeline;
2269 values[handle_count] = pWaitInfo->pValues[i];
2270 handle_count++;
2271 }
2272
2273 VkResult result = VK_SUCCESS;
2274 if (handle_count > 0) {
2275 result = anv_timelines_wait(device, timelines, values, handle_count,
2276 !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR),
2277 timeout);
2278 }
2279
2280 vk_free(&device->alloc, timelines);
2281 vk_free(&device->alloc, values);
2282
2283 return result;
2284 }
2285
2286 VkResult anv_SignalSemaphoreKHR(
2287 VkDevice _device,
2288 const VkSemaphoreSignalInfoKHR* pSignalInfo)
2289 {
2290 ANV_FROM_HANDLE(anv_device, device, _device);
2291 ANV_FROM_HANDLE(anv_semaphore, semaphore, pSignalInfo->semaphore);
2292
2293 struct anv_semaphore_impl *impl =
2294 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2295 &semaphore->temporary : &semaphore->permanent;
2296
2297 switch (impl->type) {
2298 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2299 pthread_mutex_lock(&device->mutex);
2300
2301 VkResult result = anv_timeline_gc_locked(device, &impl->timeline);
2302
2303 assert(pSignalInfo->value > impl->timeline.highest_pending);
2304
2305 impl->timeline.highest_pending = impl->timeline.highest_past = pSignalInfo->value;
2306
2307 if (result == VK_SUCCESS)
2308 result = anv_device_submit_deferred_locked(device);
2309
2310 pthread_cond_broadcast(&device->queue_submit);
2311 pthread_mutex_unlock(&device->mutex);
2312 return result;
2313 }
2314
2315 default:
2316 unreachable("Invalid semaphore type");
2317 }
2318 }