anv/pipeline: Split VFE/INTERFACE_DESCRIPTOR out to emit_media_cs_state
[mesa.git] / src / intel / vulkan / anv_queue.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31
32 #include "util/os_file.h"
33
34 #include "anv_private.h"
35 #include "vk_util.h"
36
37 #include "genxml/gen7_pack.h"
38
39 uint64_t anv_gettime_ns(void)
40 {
41 struct timespec current;
42 clock_gettime(CLOCK_MONOTONIC, &current);
43 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
44 }
45
46 uint64_t anv_get_absolute_timeout(uint64_t timeout)
47 {
48 if (timeout == 0)
49 return 0;
50 uint64_t current_time = anv_gettime_ns();
51 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
52
53 timeout = MIN2(max_timeout, timeout);
54
55 return (current_time + timeout);
56 }
57
58 static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
59 {
60 uint64_t now = anv_gettime_ns();
61
62 /* We don't want negative timeouts.
63 *
64 * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
65 * supposed to block indefinitely timeouts < 0. Unfortunately,
66 * this was broken for a couple of kernel releases. Since there's
67 * no way to know whether or not the kernel we're using is one of
68 * the broken ones, the best we can do is to clamp the timeout to
69 * INT64_MAX. This limits the maximum timeout from 584 years to
70 * 292 years - likely not a big deal.
71 */
72 if (abs_timeout < now)
73 return 0;
74
75 uint64_t rel_timeout = abs_timeout - now;
76 if (rel_timeout > (uint64_t) INT64_MAX)
77 rel_timeout = INT64_MAX;
78
79 return rel_timeout;
80 }
81
82 static struct anv_semaphore *anv_semaphore_ref(struct anv_semaphore *semaphore);
83 static void anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore);
84 static void anv_semaphore_impl_cleanup(struct anv_device *device,
85 struct anv_semaphore_impl *impl);
86
87 static void
88 anv_queue_submit_free(struct anv_device *device,
89 struct anv_queue_submit *submit)
90 {
91 const VkAllocationCallbacks *alloc = submit->alloc;
92
93 for (uint32_t i = 0; i < submit->temporary_semaphore_count; i++)
94 anv_semaphore_impl_cleanup(device, &submit->temporary_semaphores[i]);
95 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++)
96 anv_semaphore_unref(device, submit->sync_fd_semaphores[i]);
97 /* Execbuf does not consume the in_fence. It's our job to close it. */
98 if (submit->in_fence != -1)
99 close(submit->in_fence);
100 if (submit->out_fence != -1)
101 close(submit->out_fence);
102 vk_free(alloc, submit->fences);
103 vk_free(alloc, submit->temporary_semaphores);
104 vk_free(alloc, submit->wait_timelines);
105 vk_free(alloc, submit->wait_timeline_values);
106 vk_free(alloc, submit->signal_timelines);
107 vk_free(alloc, submit->signal_timeline_values);
108 vk_free(alloc, submit->fence_bos);
109 vk_free(alloc, submit);
110 }
111
112 static bool
113 anv_queue_submit_ready_locked(struct anv_queue_submit *submit)
114 {
115 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
116 if (submit->wait_timeline_values[i] > submit->wait_timelines[i]->highest_pending)
117 return false;
118 }
119
120 return true;
121 }
122
123 static VkResult
124 anv_timeline_init(struct anv_device *device,
125 struct anv_timeline *timeline,
126 uint64_t initial_value)
127 {
128 timeline->highest_past =
129 timeline->highest_pending = initial_value;
130 list_inithead(&timeline->points);
131 list_inithead(&timeline->free_points);
132
133 return VK_SUCCESS;
134 }
135
136 static void
137 anv_timeline_finish(struct anv_device *device,
138 struct anv_timeline *timeline)
139 {
140 list_for_each_entry_safe(struct anv_timeline_point, point,
141 &timeline->free_points, link) {
142 list_del(&point->link);
143 anv_device_release_bo(device, point->bo);
144 vk_free(&device->vk.alloc, point);
145 }
146 list_for_each_entry_safe(struct anv_timeline_point, point,
147 &timeline->points, link) {
148 list_del(&point->link);
149 anv_device_release_bo(device, point->bo);
150 vk_free(&device->vk.alloc, point);
151 }
152 }
153
154 static VkResult
155 anv_timeline_add_point_locked(struct anv_device *device,
156 struct anv_timeline *timeline,
157 uint64_t value,
158 struct anv_timeline_point **point)
159 {
160 VkResult result = VK_SUCCESS;
161
162 if (list_is_empty(&timeline->free_points)) {
163 *point =
164 vk_zalloc(&device->vk.alloc, sizeof(**point),
165 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
166 if (!(*point))
167 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
168 if (result == VK_SUCCESS) {
169 result = anv_device_alloc_bo(device, 4096,
170 ANV_BO_ALLOC_EXTERNAL |
171 ANV_BO_ALLOC_IMPLICIT_SYNC,
172 0 /* explicit_address */,
173 &(*point)->bo);
174 if (result != VK_SUCCESS)
175 vk_free(&device->vk.alloc, *point);
176 }
177 } else {
178 *point = list_first_entry(&timeline->free_points,
179 struct anv_timeline_point, link);
180 list_del(&(*point)->link);
181 }
182
183 if (result == VK_SUCCESS) {
184 (*point)->serial = value;
185 list_addtail(&(*point)->link, &timeline->points);
186 }
187
188 return result;
189 }
190
191 static VkResult
192 anv_timeline_gc_locked(struct anv_device *device,
193 struct anv_timeline *timeline)
194 {
195 list_for_each_entry_safe(struct anv_timeline_point, point,
196 &timeline->points, link) {
197 /* timeline->higest_pending is only incremented once submission has
198 * happened. If this point has a greater serial, it means the point
199 * hasn't been submitted yet.
200 */
201 if (point->serial > timeline->highest_pending)
202 return VK_SUCCESS;
203
204 /* If someone is waiting on this time point, consider it busy and don't
205 * try to recycle it. There's a slim possibility that it's no longer
206 * busy by the time we look at it but we would be recycling it out from
207 * under a waiter and that can lead to weird races.
208 *
209 * We walk the list in-order so if this time point is still busy so is
210 * every following time point
211 */
212 assert(point->waiting >= 0);
213 if (point->waiting)
214 return VK_SUCCESS;
215
216 /* Garbage collect any signaled point. */
217 VkResult result = anv_device_bo_busy(device, point->bo);
218 if (result == VK_NOT_READY) {
219 /* We walk the list in-order so if this time point is still busy so
220 * is every following time point
221 */
222 return VK_SUCCESS;
223 } else if (result != VK_SUCCESS) {
224 return result;
225 }
226
227 assert(timeline->highest_past < point->serial);
228 timeline->highest_past = point->serial;
229
230 list_del(&point->link);
231 list_add(&point->link, &timeline->free_points);
232 }
233
234 return VK_SUCCESS;
235 }
236
237 static VkResult anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
238 struct anv_bo *bo,
239 bool signal);
240
241 static VkResult
242 anv_queue_submit_timeline_locked(struct anv_queue *queue,
243 struct anv_queue_submit *submit)
244 {
245 VkResult result;
246
247 for (uint32_t i = 0; i < submit->wait_timeline_count; i++) {
248 struct anv_timeline *timeline = submit->wait_timelines[i];
249 uint64_t wait_value = submit->wait_timeline_values[i];
250
251 if (timeline->highest_past >= wait_value)
252 continue;
253
254 list_for_each_entry(struct anv_timeline_point, point, &timeline->points, link) {
255 if (point->serial < wait_value)
256 continue;
257 result = anv_queue_submit_add_fence_bo(submit, point->bo, false);
258 if (result != VK_SUCCESS)
259 return result;
260 break;
261 }
262 }
263 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
264 struct anv_timeline *timeline = submit->signal_timelines[i];
265 uint64_t signal_value = submit->signal_timeline_values[i];
266 struct anv_timeline_point *point;
267
268 result = anv_timeline_add_point_locked(queue->device, timeline,
269 signal_value, &point);
270 if (result != VK_SUCCESS)
271 return result;
272
273 result = anv_queue_submit_add_fence_bo(submit, point->bo, true);
274 if (result != VK_SUCCESS)
275 return result;
276 }
277
278 result = anv_queue_execbuf_locked(queue, submit);
279
280 if (result == VK_SUCCESS) {
281 /* Update the pending values in the timeline objects. */
282 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
283 struct anv_timeline *timeline = submit->signal_timelines[i];
284 uint64_t signal_value = submit->signal_timeline_values[i];
285
286 assert(signal_value > timeline->highest_pending);
287 timeline->highest_pending = signal_value;
288 }
289
290 /* Update signaled semaphores backed by syncfd. */
291 for (uint32_t i = 0; i < submit->sync_fd_semaphore_count; i++) {
292 struct anv_semaphore *semaphore = submit->sync_fd_semaphores[i];
293 /* Out fences can't have temporary state because that would imply
294 * that we imported a sync file and are trying to signal it.
295 */
296 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
297 struct anv_semaphore_impl *impl = &semaphore->permanent;
298
299 assert(impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE);
300 impl->fd = os_dupfd_cloexec(submit->out_fence);
301 }
302 } else {
303 /* Unblock any waiter by signaling the points, the application will get
304 * a device lost error code.
305 */
306 for (uint32_t i = 0; i < submit->signal_timeline_count; i++) {
307 struct anv_timeline *timeline = submit->signal_timelines[i];
308 uint64_t signal_value = submit->signal_timeline_values[i];
309
310 assert(signal_value > timeline->highest_pending);
311 timeline->highest_past = timeline->highest_pending = signal_value;
312 }
313 }
314
315 return result;
316 }
317
318 static VkResult
319 anv_queue_submit_deferred_locked(struct anv_queue *queue, uint32_t *advance)
320 {
321 VkResult result = VK_SUCCESS;
322
323 /* Go through all the queued submissions and submit then until we find one
324 * that's waiting on a point that hasn't materialized yet.
325 */
326 list_for_each_entry_safe(struct anv_queue_submit, submit,
327 &queue->queued_submits, link) {
328 if (!anv_queue_submit_ready_locked(submit))
329 break;
330
331 (*advance)++;
332 list_del(&submit->link);
333
334 result = anv_queue_submit_timeline_locked(queue, submit);
335
336 anv_queue_submit_free(queue->device, submit);
337
338 if (result != VK_SUCCESS)
339 break;
340 }
341
342 return result;
343 }
344
345 static VkResult
346 anv_device_submit_deferred_locked(struct anv_device *device)
347 {
348 uint32_t advance = 0;
349 return anv_queue_submit_deferred_locked(&device->queue, &advance);
350 }
351
352 static VkResult
353 _anv_queue_submit(struct anv_queue *queue, struct anv_queue_submit **_submit,
354 bool flush_queue)
355 {
356 struct anv_queue_submit *submit = *_submit;
357
358 /* Wait before signal behavior means we might keep alive the
359 * anv_queue_submit object a bit longer, so transfer the ownership to the
360 * anv_queue.
361 */
362 *_submit = NULL;
363
364 pthread_mutex_lock(&queue->device->mutex);
365 list_addtail(&submit->link, &queue->queued_submits);
366 VkResult result = anv_device_submit_deferred_locked(queue->device);
367 if (flush_queue) {
368 while (result == VK_SUCCESS && !list_is_empty(&queue->queued_submits)) {
369 int ret = pthread_cond_wait(&queue->device->queue_submit,
370 &queue->device->mutex);
371 if (ret != 0) {
372 result = anv_device_set_lost(queue->device, "wait timeout");
373 break;
374 }
375
376 result = anv_device_submit_deferred_locked(queue->device);
377 }
378 }
379 pthread_mutex_unlock(&queue->device->mutex);
380 return result;
381 }
382
383 VkResult
384 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
385 {
386 vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
387 queue->device = device;
388 queue->flags = 0;
389
390 list_inithead(&queue->queued_submits);
391
392 return VK_SUCCESS;
393 }
394
395 void
396 anv_queue_finish(struct anv_queue *queue)
397 {
398 vk_object_base_finish(&queue->base);
399 }
400
401 static VkResult
402 anv_queue_submit_add_fence_bo(struct anv_queue_submit *submit,
403 struct anv_bo *bo,
404 bool signal)
405 {
406 if (submit->fence_bo_count >= submit->fence_bo_array_length) {
407 uint32_t new_len = MAX2(submit->fence_bo_array_length * 2, 64);
408
409 submit->fence_bos =
410 vk_realloc(submit->alloc,
411 submit->fence_bos, new_len * sizeof(*submit->fence_bos),
412 8, submit->alloc_scope);
413 if (submit->fence_bos == NULL)
414 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
415
416 submit->fence_bo_array_length = new_len;
417 }
418
419 /* Take advantage that anv_bo are allocated at 8 byte alignement so we can
420 * use the lowest bit to store whether this is a BO we need to signal.
421 */
422 submit->fence_bos[submit->fence_bo_count++] = anv_pack_ptr(bo, 1, signal);
423
424 return VK_SUCCESS;
425 }
426
427 static VkResult
428 anv_queue_submit_add_syncobj(struct anv_queue_submit* submit,
429 struct anv_device *device,
430 uint32_t handle, uint32_t flags)
431 {
432 assert(flags != 0);
433
434 if (submit->fence_count >= submit->fence_array_length) {
435 uint32_t new_len = MAX2(submit->fence_array_length * 2, 64);
436
437 submit->fences =
438 vk_realloc(submit->alloc,
439 submit->fences, new_len * sizeof(*submit->fences),
440 8, submit->alloc_scope);
441 if (submit->fences == NULL)
442 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
443
444 submit->fence_array_length = new_len;
445 }
446
447 submit->fences[submit->fence_count++] = (struct drm_i915_gem_exec_fence) {
448 .handle = handle,
449 .flags = flags,
450 };
451
452 return VK_SUCCESS;
453 }
454
455 static VkResult
456 anv_queue_submit_add_sync_fd_fence(struct anv_queue_submit *submit,
457 struct anv_semaphore *semaphore)
458 {
459 if (submit->sync_fd_semaphore_count >= submit->sync_fd_semaphore_array_length) {
460 uint32_t new_len = MAX2(submit->sync_fd_semaphore_array_length * 2, 64);
461 struct anv_semaphore **new_semaphores =
462 vk_realloc(submit->alloc, submit->sync_fd_semaphores,
463 new_len * sizeof(*submit->sync_fd_semaphores), 8,
464 submit->alloc_scope);
465 if (new_semaphores == NULL)
466 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
467
468 submit->sync_fd_semaphores = new_semaphores;
469 }
470
471 submit->sync_fd_semaphores[submit->sync_fd_semaphore_count++] =
472 anv_semaphore_ref(semaphore);
473 submit->need_out_fence = true;
474
475 return VK_SUCCESS;
476 }
477
478 static VkResult
479 anv_queue_submit_add_timeline_wait(struct anv_queue_submit* submit,
480 struct anv_device *device,
481 struct anv_timeline *timeline,
482 uint64_t value)
483 {
484 if (submit->wait_timeline_count >= submit->wait_timeline_array_length) {
485 uint32_t new_len = MAX2(submit->wait_timeline_array_length * 2, 64);
486
487 submit->wait_timelines =
488 vk_realloc(submit->alloc,
489 submit->wait_timelines, new_len * sizeof(*submit->wait_timelines),
490 8, submit->alloc_scope);
491 if (submit->wait_timelines == NULL)
492 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
493
494 submit->wait_timeline_values =
495 vk_realloc(submit->alloc,
496 submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
497 8, submit->alloc_scope);
498 if (submit->wait_timeline_values == NULL)
499 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
500
501 submit->wait_timeline_array_length = new_len;
502 }
503
504 submit->wait_timelines[submit->wait_timeline_count] = timeline;
505 submit->wait_timeline_values[submit->wait_timeline_count] = value;
506
507 submit->wait_timeline_count++;
508
509 return VK_SUCCESS;
510 }
511
512 static VkResult
513 anv_queue_submit_add_timeline_signal(struct anv_queue_submit* submit,
514 struct anv_device *device,
515 struct anv_timeline *timeline,
516 uint64_t value)
517 {
518 assert(timeline->highest_pending < value);
519
520 if (submit->signal_timeline_count >= submit->signal_timeline_array_length) {
521 uint32_t new_len = MAX2(submit->signal_timeline_array_length * 2, 64);
522
523 submit->signal_timelines =
524 vk_realloc(submit->alloc,
525 submit->signal_timelines, new_len * sizeof(*submit->signal_timelines),
526 8, submit->alloc_scope);
527 if (submit->signal_timelines == NULL)
528 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
529
530 submit->signal_timeline_values =
531 vk_realloc(submit->alloc,
532 submit->signal_timeline_values, new_len * sizeof(*submit->signal_timeline_values),
533 8, submit->alloc_scope);
534 if (submit->signal_timeline_values == NULL)
535 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
536
537 submit->signal_timeline_array_length = new_len;
538 }
539
540 submit->signal_timelines[submit->signal_timeline_count] = timeline;
541 submit->signal_timeline_values[submit->signal_timeline_count] = value;
542
543 submit->signal_timeline_count++;
544
545 return VK_SUCCESS;
546 }
547
548 static struct anv_queue_submit *
549 anv_queue_submit_alloc(struct anv_device *device, int perf_query_pass)
550 {
551 const VkAllocationCallbacks *alloc = &device->vk.alloc;
552 VkSystemAllocationScope alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE;
553
554 struct anv_queue_submit *submit = vk_zalloc(alloc, sizeof(*submit), 8, alloc_scope);
555 if (!submit)
556 return NULL;
557
558 submit->alloc = alloc;
559 submit->alloc_scope = alloc_scope;
560 submit->in_fence = -1;
561 submit->out_fence = -1;
562 submit->perf_query_pass = perf_query_pass;
563
564 return submit;
565 }
566
567 VkResult
568 anv_queue_submit_simple_batch(struct anv_queue *queue,
569 struct anv_batch *batch)
570 {
571 if (queue->device->no_hw)
572 return VK_SUCCESS;
573
574 struct anv_device *device = queue->device;
575 struct anv_queue_submit *submit = anv_queue_submit_alloc(device, -1);
576 if (!submit)
577 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
578
579 bool has_syncobj_wait = device->physical->has_syncobj_wait;
580 VkResult result;
581 uint32_t syncobj;
582 struct anv_bo *batch_bo, *sync_bo;
583
584 if (has_syncobj_wait) {
585 syncobj = anv_gem_syncobj_create(device, 0);
586 if (!syncobj) {
587 result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
588 goto err_free_submit;
589 }
590
591 result = anv_queue_submit_add_syncobj(submit, device, syncobj,
592 I915_EXEC_FENCE_SIGNAL);
593 } else {
594 result = anv_device_alloc_bo(device, 4096,
595 ANV_BO_ALLOC_EXTERNAL |
596 ANV_BO_ALLOC_IMPLICIT_SYNC,
597 0 /* explicit_address */,
598 &sync_bo);
599 if (result != VK_SUCCESS)
600 goto err_free_submit;
601
602 result = anv_queue_submit_add_fence_bo(submit, sync_bo, true /* signal */);
603 }
604
605 if (result != VK_SUCCESS)
606 goto err_destroy_sync_primitive;
607
608 if (batch) {
609 uint32_t size = align_u32(batch->next - batch->start, 8);
610 result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &batch_bo);
611 if (result != VK_SUCCESS)
612 goto err_destroy_sync_primitive;
613
614 memcpy(batch_bo->map, batch->start, size);
615 if (!device->info.has_llc)
616 gen_flush_range(batch_bo->map, size);
617
618 submit->simple_bo = batch_bo;
619 submit->simple_bo_size = size;
620 }
621
622 result = _anv_queue_submit(queue, &submit, true);
623
624 if (result == VK_SUCCESS) {
625 if (has_syncobj_wait) {
626 if (anv_gem_syncobj_wait(device, &syncobj, 1,
627 anv_get_absolute_timeout(INT64_MAX), true))
628 result = anv_device_set_lost(device, "anv_gem_syncobj_wait failed: %m");
629 anv_gem_syncobj_destroy(device, syncobj);
630 } else {
631 result = anv_device_wait(device, sync_bo,
632 anv_get_relative_timeout(INT64_MAX));
633 anv_device_release_bo(device, sync_bo);
634 }
635 }
636
637 if (batch)
638 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
639
640 if (submit)
641 anv_queue_submit_free(device, submit);
642
643 return result;
644
645 err_destroy_sync_primitive:
646 if (has_syncobj_wait)
647 anv_gem_syncobj_destroy(device, syncobj);
648 else
649 anv_device_release_bo(device, sync_bo);
650 err_free_submit:
651 if (submit)
652 anv_queue_submit_free(device, submit);
653
654 return result;
655 }
656
657 /* Transfer ownership of temporary semaphores from the VkSemaphore object to
658 * the anv_queue_submit object. Those temporary semaphores are then freed in
659 * anv_queue_submit_free() once the driver is finished with them.
660 */
661 static VkResult
662 maybe_transfer_temporary_semaphore(struct anv_queue_submit *submit,
663 struct anv_semaphore *semaphore,
664 struct anv_semaphore_impl **out_impl)
665 {
666 struct anv_semaphore_impl *impl = &semaphore->temporary;
667
668 if (impl->type == ANV_SEMAPHORE_TYPE_NONE) {
669 *out_impl = &semaphore->permanent;
670 return VK_SUCCESS;
671 }
672
673 /* BO backed timeline semaphores cannot be temporary. */
674 assert(impl->type != ANV_SEMAPHORE_TYPE_TIMELINE);
675
676 /*
677 * There is a requirement to reset semaphore to their permanent state after
678 * submission. From the Vulkan 1.0.53 spec:
679 *
680 * "If the import is temporary, the implementation must restore the
681 * semaphore to its prior permanent state after submitting the next
682 * semaphore wait operation."
683 *
684 * In the case we defer the actual submission to a thread because of the
685 * wait-before-submit behavior required for timeline semaphores, we need to
686 * make copies of the temporary syncobj to ensure they stay alive until we
687 * do the actual execbuffer ioctl.
688 */
689 if (submit->temporary_semaphore_count >= submit->temporary_semaphore_array_length) {
690 uint32_t new_len = MAX2(submit->temporary_semaphore_array_length * 2, 8);
691 /* Make sure that if the realloc fails, we still have the old semaphore
692 * array around to properly clean things up on failure.
693 */
694 struct anv_semaphore_impl *new_array =
695 vk_realloc(submit->alloc,
696 submit->temporary_semaphores,
697 new_len * sizeof(*submit->temporary_semaphores),
698 8, submit->alloc_scope);
699 if (new_array == NULL)
700 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
701
702 submit->temporary_semaphores = new_array;
703 submit->temporary_semaphore_array_length = new_len;
704 }
705
706 /* Copy anv_semaphore_impl into anv_queue_submit. */
707 submit->temporary_semaphores[submit->temporary_semaphore_count++] = *impl;
708 *out_impl = &submit->temporary_semaphores[submit->temporary_semaphore_count - 1];
709
710 /* Clear the incoming semaphore */
711 impl->type = ANV_SEMAPHORE_TYPE_NONE;
712
713 return VK_SUCCESS;
714 }
715
716 static VkResult
717 anv_queue_submit(struct anv_queue *queue,
718 struct anv_cmd_buffer *cmd_buffer,
719 const VkSemaphore *in_semaphores,
720 const uint64_t *in_values,
721 uint32_t num_in_semaphores,
722 const VkSemaphore *out_semaphores,
723 const uint64_t *out_values,
724 uint32_t num_out_semaphores,
725 struct anv_bo *wsi_signal_bo,
726 VkFence _fence,
727 int perf_query_pass)
728 {
729 ANV_FROM_HANDLE(anv_fence, fence, _fence);
730 struct anv_device *device = queue->device;
731 UNUSED struct anv_physical_device *pdevice = device->physical;
732 struct anv_queue_submit *submit = anv_queue_submit_alloc(device, perf_query_pass);
733 if (!submit)
734 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
735
736 submit->cmd_buffer = cmd_buffer;
737
738 VkResult result = VK_SUCCESS;
739
740 for (uint32_t i = 0; i < num_in_semaphores; i++) {
741 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
742 struct anv_semaphore_impl *impl;
743
744 result = maybe_transfer_temporary_semaphore(submit, semaphore, &impl);
745 if (result != VK_SUCCESS)
746 goto error;
747
748 switch (impl->type) {
749 case ANV_SEMAPHORE_TYPE_BO:
750 assert(!pdevice->has_syncobj);
751 result = anv_queue_submit_add_fence_bo(submit, impl->bo, false /* signal */);
752 if (result != VK_SUCCESS)
753 goto error;
754 break;
755
756 case ANV_SEMAPHORE_TYPE_WSI_BO:
757 /* When using a window-system buffer as a semaphore, always enable
758 * EXEC_OBJECT_WRITE. This gives us a WaR hazard with the display or
759 * compositor's read of the buffer and enforces that we don't start
760 * rendering until they are finished. This is exactly the
761 * synchronization we want with vkAcquireNextImage.
762 */
763 result = anv_queue_submit_add_fence_bo(submit, impl->bo, true /* signal */);
764 if (result != VK_SUCCESS)
765 goto error;
766 break;
767
768 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
769 assert(!pdevice->has_syncobj);
770 if (submit->in_fence == -1) {
771 submit->in_fence = impl->fd;
772 if (submit->in_fence == -1) {
773 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
774 goto error;
775 }
776 impl->fd = -1;
777 } else {
778 int merge = anv_gem_sync_file_merge(device, submit->in_fence, impl->fd);
779 if (merge == -1) {
780 result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
781 goto error;
782 }
783 close(impl->fd);
784 close(submit->in_fence);
785 impl->fd = -1;
786 submit->in_fence = merge;
787 }
788 break;
789
790 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
791 result = anv_queue_submit_add_syncobj(submit, device,
792 impl->syncobj,
793 I915_EXEC_FENCE_WAIT);
794 if (result != VK_SUCCESS)
795 goto error;
796 break;
797 }
798
799 case ANV_SEMAPHORE_TYPE_TIMELINE:
800 result = anv_queue_submit_add_timeline_wait(submit, device,
801 &impl->timeline,
802 in_values ? in_values[i] : 0);
803 if (result != VK_SUCCESS)
804 goto error;
805 break;
806
807 default:
808 break;
809 }
810 }
811
812 for (uint32_t i = 0; i < num_out_semaphores; i++) {
813 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
814
815 /* Under most circumstances, out fences won't be temporary. However,
816 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
817 *
818 * "If the import is temporary, the implementation must restore the
819 * semaphore to its prior permanent state after submitting the next
820 * semaphore wait operation."
821 *
822 * The spec says nothing whatsoever about signal operations on
823 * temporarily imported semaphores so it appears they are allowed.
824 * There are also CTS tests that require this to work.
825 */
826 struct anv_semaphore_impl *impl =
827 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
828 &semaphore->temporary : &semaphore->permanent;
829
830 switch (impl->type) {
831 case ANV_SEMAPHORE_TYPE_BO:
832 assert(!pdevice->has_syncobj);
833 result = anv_queue_submit_add_fence_bo(submit, impl->bo, true /* signal */);
834 if (result != VK_SUCCESS)
835 goto error;
836 break;
837
838 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
839 assert(!pdevice->has_syncobj);
840 result = anv_queue_submit_add_sync_fd_fence(submit, semaphore);
841 if (result != VK_SUCCESS)
842 goto error;
843 break;
844
845 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ: {
846 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
847 I915_EXEC_FENCE_SIGNAL);
848 if (result != VK_SUCCESS)
849 goto error;
850 break;
851 }
852
853 case ANV_SEMAPHORE_TYPE_TIMELINE:
854 result = anv_queue_submit_add_timeline_signal(submit, device,
855 &impl->timeline,
856 out_values ? out_values[i] : 0);
857 if (result != VK_SUCCESS)
858 goto error;
859 break;
860
861 default:
862 break;
863 }
864 }
865
866 if (wsi_signal_bo) {
867 result = anv_queue_submit_add_fence_bo(submit, wsi_signal_bo, true /* signal */);
868 if (result != VK_SUCCESS)
869 goto error;
870 }
871
872 if (fence) {
873 /* Under most circumstances, out fences won't be temporary. However,
874 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
875 *
876 * "If the import is temporary, the implementation must restore the
877 * semaphore to its prior permanent state after submitting the next
878 * semaphore wait operation."
879 *
880 * The spec says nothing whatsoever about signal operations on
881 * temporarily imported semaphores so it appears they are allowed.
882 * There are also CTS tests that require this to work.
883 */
884 struct anv_fence_impl *impl =
885 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
886 &fence->temporary : &fence->permanent;
887
888 switch (impl->type) {
889 case ANV_FENCE_TYPE_BO:
890 result = anv_queue_submit_add_fence_bo(submit, impl->bo.bo, true /* signal */);
891 if (result != VK_SUCCESS)
892 goto error;
893 break;
894
895 case ANV_FENCE_TYPE_SYNCOBJ: {
896 /*
897 * For the same reason we reset the signaled binary syncobj above,
898 * also reset the fence's syncobj so that they don't contain a
899 * signaled dma-fence.
900 */
901 result = anv_queue_submit_add_syncobj(submit, device, impl->syncobj,
902 I915_EXEC_FENCE_SIGNAL);
903 if (result != VK_SUCCESS)
904 goto error;
905 break;
906 }
907
908 default:
909 unreachable("Invalid fence type");
910 }
911 }
912
913 result = _anv_queue_submit(queue, &submit, false);
914 if (result != VK_SUCCESS)
915 goto error;
916
917 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
918 /* If we have permanent BO fence, the only type of temporary possible
919 * would be BO_WSI (because BO fences are not shareable). The Vulkan spec
920 * also requires that the fence passed to vkQueueSubmit() be :
921 *
922 * * unsignaled
923 * * not be associated with any other queue command that has not yet
924 * completed execution on that queue
925 *
926 * So the only acceptable type for the temporary is NONE.
927 */
928 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
929
930 /* Once the execbuf has returned, we need to set the fence state to
931 * SUBMITTED. We can't do this before calling execbuf because
932 * anv_GetFenceStatus does take the global device lock before checking
933 * fence->state.
934 *
935 * We set the fence state to SUBMITTED regardless of whether or not the
936 * execbuf succeeds because we need to ensure that vkWaitForFences() and
937 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
938 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
939 */
940 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
941 }
942
943 error:
944 if (submit)
945 anv_queue_submit_free(device, submit);
946
947 return result;
948 }
949
950 VkResult anv_QueueSubmit(
951 VkQueue _queue,
952 uint32_t submitCount,
953 const VkSubmitInfo* pSubmits,
954 VkFence fence)
955 {
956 ANV_FROM_HANDLE(anv_queue, queue, _queue);
957
958 if (queue->device->no_hw)
959 return VK_SUCCESS;
960
961 /* Query for device status prior to submitting. Technically, we don't need
962 * to do this. However, if we have a client that's submitting piles of
963 * garbage, we would rather break as early as possible to keep the GPU
964 * hanging contained. If we don't check here, we'll either be waiting for
965 * the kernel to kick us or we'll have to wait until the client waits on a
966 * fence before we actually know whether or not we've hung.
967 */
968 VkResult result = anv_device_query_status(queue->device);
969 if (result != VK_SUCCESS)
970 return result;
971
972 if (fence && submitCount == 0) {
973 /* If we don't have any command buffers, we need to submit a dummy
974 * batch to give GEM something to wait on. We could, potentially,
975 * come up with something more efficient but this shouldn't be a
976 * common case.
977 */
978 result = anv_queue_submit(queue, NULL, NULL, NULL, 0, NULL, NULL, 0,
979 NULL, fence, -1);
980 goto out;
981 }
982
983 for (uint32_t i = 0; i < submitCount; i++) {
984 /* Fence for this submit. NULL for all but the last one */
985 VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
986
987 const struct wsi_memory_signal_submit_info *mem_signal_info =
988 vk_find_struct_const(pSubmits[i].pNext,
989 WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
990 struct anv_bo *wsi_signal_bo =
991 mem_signal_info && mem_signal_info->memory != VK_NULL_HANDLE ?
992 anv_device_memory_from_handle(mem_signal_info->memory)->bo : NULL;
993
994 const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
995 vk_find_struct_const(pSubmits[i].pNext,
996 TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
997 const VkPerformanceQuerySubmitInfoKHR *perf_info =
998 vk_find_struct_const(pSubmits[i].pNext,
999 PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
1000 const uint64_t *wait_values =
1001 timeline_info && timeline_info->waitSemaphoreValueCount ?
1002 timeline_info->pWaitSemaphoreValues : NULL;
1003 const uint64_t *signal_values =
1004 timeline_info && timeline_info->signalSemaphoreValueCount ?
1005 timeline_info->pSignalSemaphoreValues : NULL;
1006
1007 if (pSubmits[i].commandBufferCount == 0) {
1008 /* If we don't have any command buffers, we need to submit a dummy
1009 * batch to give GEM something to wait on. We could, potentially,
1010 * come up with something more efficient but this shouldn't be a
1011 * common case.
1012 */
1013 result = anv_queue_submit(queue, NULL,
1014 pSubmits[i].pWaitSemaphores,
1015 wait_values,
1016 pSubmits[i].waitSemaphoreCount,
1017 pSubmits[i].pSignalSemaphores,
1018 signal_values,
1019 pSubmits[i].signalSemaphoreCount,
1020 wsi_signal_bo,
1021 submit_fence,
1022 -1);
1023 if (result != VK_SUCCESS)
1024 goto out;
1025
1026 continue;
1027 }
1028
1029 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1030 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1031 pSubmits[i].pCommandBuffers[j]);
1032 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1033 assert(!anv_batch_has_error(&cmd_buffer->batch));
1034
1035 /* Fence for this execbuf. NULL for all but the last one */
1036 VkFence execbuf_fence =
1037 (j == pSubmits[i].commandBufferCount - 1) ?
1038 submit_fence : VK_NULL_HANDLE;
1039
1040 const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
1041 const uint64_t *in_values = NULL, *out_values = NULL;
1042 uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
1043 if (j == 0) {
1044 /* Only the first batch gets the in semaphores */
1045 in_semaphores = pSubmits[i].pWaitSemaphores;
1046 in_values = wait_values;
1047 num_in_semaphores = pSubmits[i].waitSemaphoreCount;
1048 }
1049
1050 if (j == pSubmits[i].commandBufferCount - 1) {
1051 /* Only the last batch gets the out semaphores */
1052 out_semaphores = pSubmits[i].pSignalSemaphores;
1053 out_values = signal_values;
1054 num_out_semaphores = pSubmits[i].signalSemaphoreCount;
1055 }
1056
1057 result = anv_queue_submit(queue, cmd_buffer,
1058 in_semaphores, in_values, num_in_semaphores,
1059 out_semaphores, out_values, num_out_semaphores,
1060 wsi_signal_bo, execbuf_fence,
1061 perf_info ? perf_info->counterPassIndex : 0);
1062 if (result != VK_SUCCESS)
1063 goto out;
1064 }
1065 }
1066
1067 out:
1068 if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) {
1069 /* In the case that something has gone wrong we may end up with an
1070 * inconsistent state from which it may not be trivial to recover.
1071 * For example, we might have computed address relocations and
1072 * any future attempt to re-submit this job will need to know about
1073 * this and avoid computing relocation addresses again.
1074 *
1075 * To avoid this sort of issues, we assume that if something was
1076 * wrong during submission we must already be in a really bad situation
1077 * anyway (such us being out of memory) and return
1078 * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
1079 * submit the same job again to this device.
1080 *
1081 * We skip doing this on VK_ERROR_DEVICE_LOST because
1082 * anv_device_set_lost() would have been called already by a callee of
1083 * anv_queue_submit().
1084 */
1085 result = anv_device_set_lost(queue->device, "vkQueueSubmit() failed");
1086 }
1087
1088 return result;
1089 }
1090
1091 VkResult anv_QueueWaitIdle(
1092 VkQueue _queue)
1093 {
1094 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1095
1096 if (anv_device_is_lost(queue->device))
1097 return VK_ERROR_DEVICE_LOST;
1098
1099 return anv_queue_submit_simple_batch(queue, NULL);
1100 }
1101
1102 VkResult anv_CreateFence(
1103 VkDevice _device,
1104 const VkFenceCreateInfo* pCreateInfo,
1105 const VkAllocationCallbacks* pAllocator,
1106 VkFence* pFence)
1107 {
1108 ANV_FROM_HANDLE(anv_device, device, _device);
1109 struct anv_fence *fence;
1110
1111 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1112
1113 fence = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8,
1114 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1115 if (fence == NULL)
1116 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1117
1118 vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
1119
1120 if (device->physical->has_syncobj_wait) {
1121 fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
1122
1123 uint32_t create_flags = 0;
1124 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
1125 create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1126
1127 fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
1128 if (!fence->permanent.syncobj)
1129 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1130 } else {
1131 fence->permanent.type = ANV_FENCE_TYPE_BO;
1132
1133 VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
1134 &fence->permanent.bo.bo);
1135 if (result != VK_SUCCESS)
1136 return result;
1137
1138 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
1139 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1140 } else {
1141 fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
1142 }
1143 }
1144
1145 *pFence = anv_fence_to_handle(fence);
1146
1147 return VK_SUCCESS;
1148 }
1149
1150 static void
1151 anv_fence_impl_cleanup(struct anv_device *device,
1152 struct anv_fence_impl *impl)
1153 {
1154 switch (impl->type) {
1155 case ANV_FENCE_TYPE_NONE:
1156 /* Dummy. Nothing to do */
1157 break;
1158
1159 case ANV_FENCE_TYPE_BO:
1160 anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
1161 break;
1162
1163 case ANV_FENCE_TYPE_WSI_BO:
1164 anv_device_release_bo(device, impl->bo.bo);
1165 break;
1166
1167 case ANV_FENCE_TYPE_SYNCOBJ:
1168 anv_gem_syncobj_destroy(device, impl->syncobj);
1169 break;
1170
1171 case ANV_FENCE_TYPE_WSI:
1172 impl->fence_wsi->destroy(impl->fence_wsi);
1173 break;
1174
1175 default:
1176 unreachable("Invalid fence type");
1177 }
1178
1179 impl->type = ANV_FENCE_TYPE_NONE;
1180 }
1181
1182 void
1183 anv_fence_reset_temporary(struct anv_device *device,
1184 struct anv_fence *fence)
1185 {
1186 if (fence->temporary.type == ANV_FENCE_TYPE_NONE)
1187 return;
1188
1189 anv_fence_impl_cleanup(device, &fence->temporary);
1190 }
1191
1192 void anv_DestroyFence(
1193 VkDevice _device,
1194 VkFence _fence,
1195 const VkAllocationCallbacks* pAllocator)
1196 {
1197 ANV_FROM_HANDLE(anv_device, device, _device);
1198 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1199
1200 if (!fence)
1201 return;
1202
1203 anv_fence_impl_cleanup(device, &fence->temporary);
1204 anv_fence_impl_cleanup(device, &fence->permanent);
1205
1206 vk_object_base_finish(&fence->base);
1207 vk_free2(&device->vk.alloc, pAllocator, fence);
1208 }
1209
1210 VkResult anv_ResetFences(
1211 VkDevice _device,
1212 uint32_t fenceCount,
1213 const VkFence* pFences)
1214 {
1215 ANV_FROM_HANDLE(anv_device, device, _device);
1216
1217 for (uint32_t i = 0; i < fenceCount; i++) {
1218 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1219
1220 /* From the Vulkan 1.0.53 spec:
1221 *
1222 * "If any member of pFences currently has its payload imported with
1223 * temporary permanence, that fence’s prior permanent payload is
1224 * first restored. The remaining operations described therefore
1225 * operate on the restored payload.
1226 */
1227 anv_fence_reset_temporary(device, fence);
1228
1229 struct anv_fence_impl *impl = &fence->permanent;
1230
1231 switch (impl->type) {
1232 case ANV_FENCE_TYPE_BO:
1233 impl->bo.state = ANV_BO_FENCE_STATE_RESET;
1234 break;
1235
1236 case ANV_FENCE_TYPE_SYNCOBJ:
1237 anv_gem_syncobj_reset(device, impl->syncobj);
1238 break;
1239
1240 default:
1241 unreachable("Invalid fence type");
1242 }
1243 }
1244
1245 return VK_SUCCESS;
1246 }
1247
1248 VkResult anv_GetFenceStatus(
1249 VkDevice _device,
1250 VkFence _fence)
1251 {
1252 ANV_FROM_HANDLE(anv_device, device, _device);
1253 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1254
1255 if (anv_device_is_lost(device))
1256 return VK_ERROR_DEVICE_LOST;
1257
1258 struct anv_fence_impl *impl =
1259 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1260 &fence->temporary : &fence->permanent;
1261
1262 switch (impl->type) {
1263 case ANV_FENCE_TYPE_BO:
1264 case ANV_FENCE_TYPE_WSI_BO:
1265 switch (impl->bo.state) {
1266 case ANV_BO_FENCE_STATE_RESET:
1267 /* If it hasn't even been sent off to the GPU yet, it's not ready */
1268 return VK_NOT_READY;
1269
1270 case ANV_BO_FENCE_STATE_SIGNALED:
1271 /* It's been signaled, return success */
1272 return VK_SUCCESS;
1273
1274 case ANV_BO_FENCE_STATE_SUBMITTED: {
1275 VkResult result = anv_device_bo_busy(device, impl->bo.bo);
1276 if (result == VK_SUCCESS) {
1277 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1278 return VK_SUCCESS;
1279 } else {
1280 return result;
1281 }
1282 }
1283 default:
1284 unreachable("Invalid fence status");
1285 }
1286
1287 case ANV_FENCE_TYPE_SYNCOBJ: {
1288 int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
1289 if (ret == -1) {
1290 if (errno == ETIME) {
1291 return VK_NOT_READY;
1292 } else {
1293 /* We don't know the real error. */
1294 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1295 }
1296 } else {
1297 return VK_SUCCESS;
1298 }
1299 }
1300
1301 default:
1302 unreachable("Invalid fence type");
1303 }
1304 }
1305
1306 static VkResult
1307 anv_wait_for_syncobj_fences(struct anv_device *device,
1308 uint32_t fenceCount,
1309 const VkFence *pFences,
1310 bool waitAll,
1311 uint64_t abs_timeout_ns)
1312 {
1313 uint32_t *syncobjs = vk_zalloc(&device->vk.alloc,
1314 sizeof(*syncobjs) * fenceCount, 8,
1315 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1316 if (!syncobjs)
1317 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1318
1319 for (uint32_t i = 0; i < fenceCount; i++) {
1320 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1321 assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
1322
1323 struct anv_fence_impl *impl =
1324 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1325 &fence->temporary : &fence->permanent;
1326
1327 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1328 syncobjs[i] = impl->syncobj;
1329 }
1330
1331 /* The gem_syncobj_wait ioctl may return early due to an inherent
1332 * limitation in the way it computes timeouts. Loop until we've actually
1333 * passed the timeout.
1334 */
1335 int ret;
1336 do {
1337 ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
1338 abs_timeout_ns, waitAll);
1339 } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
1340
1341 vk_free(&device->vk.alloc, syncobjs);
1342
1343 if (ret == -1) {
1344 if (errno == ETIME) {
1345 return VK_TIMEOUT;
1346 } else {
1347 /* We don't know the real error. */
1348 return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
1349 }
1350 } else {
1351 return VK_SUCCESS;
1352 }
1353 }
1354
1355 static VkResult
1356 anv_wait_for_bo_fences(struct anv_device *device,
1357 uint32_t fenceCount,
1358 const VkFence *pFences,
1359 bool waitAll,
1360 uint64_t abs_timeout_ns)
1361 {
1362 VkResult result = VK_SUCCESS;
1363 uint32_t pending_fences = fenceCount;
1364 while (pending_fences) {
1365 pending_fences = 0;
1366 bool signaled_fences = false;
1367 for (uint32_t i = 0; i < fenceCount; i++) {
1368 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1369
1370 struct anv_fence_impl *impl =
1371 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1372 &fence->temporary : &fence->permanent;
1373 assert(impl->type == ANV_FENCE_TYPE_BO ||
1374 impl->type == ANV_FENCE_TYPE_WSI_BO);
1375
1376 switch (impl->bo.state) {
1377 case ANV_BO_FENCE_STATE_RESET:
1378 /* This fence hasn't been submitted yet, we'll catch it the next
1379 * time around. Yes, this may mean we dead-loop but, short of
1380 * lots of locking and a condition variable, there's not much that
1381 * we can do about that.
1382 */
1383 pending_fences++;
1384 continue;
1385
1386 case ANV_BO_FENCE_STATE_SIGNALED:
1387 /* This fence is not pending. If waitAll isn't set, we can return
1388 * early. Otherwise, we have to keep going.
1389 */
1390 if (!waitAll) {
1391 result = VK_SUCCESS;
1392 goto done;
1393 }
1394 continue;
1395
1396 case ANV_BO_FENCE_STATE_SUBMITTED:
1397 /* These are the fences we really care about. Go ahead and wait
1398 * on it until we hit a timeout.
1399 */
1400 result = anv_device_wait(device, impl->bo.bo,
1401 anv_get_relative_timeout(abs_timeout_ns));
1402 switch (result) {
1403 case VK_SUCCESS:
1404 impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
1405 signaled_fences = true;
1406 if (!waitAll)
1407 goto done;
1408 break;
1409
1410 case VK_TIMEOUT:
1411 goto done;
1412
1413 default:
1414 return result;
1415 }
1416 }
1417 }
1418
1419 if (pending_fences && !signaled_fences) {
1420 /* If we've hit this then someone decided to vkWaitForFences before
1421 * they've actually submitted any of them to a queue. This is a
1422 * fairly pessimal case, so it's ok to lock here and use a standard
1423 * pthreads condition variable.
1424 */
1425 pthread_mutex_lock(&device->mutex);
1426
1427 /* It's possible that some of the fences have changed state since the
1428 * last time we checked. Now that we have the lock, check for
1429 * pending fences again and don't wait if it's changed.
1430 */
1431 uint32_t now_pending_fences = 0;
1432 for (uint32_t i = 0; i < fenceCount; i++) {
1433 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1434 if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
1435 now_pending_fences++;
1436 }
1437 assert(now_pending_fences <= pending_fences);
1438
1439 if (now_pending_fences == pending_fences) {
1440 struct timespec abstime = {
1441 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
1442 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
1443 };
1444
1445 ASSERTED int ret;
1446 ret = pthread_cond_timedwait(&device->queue_submit,
1447 &device->mutex, &abstime);
1448 assert(ret != EINVAL);
1449 if (anv_gettime_ns() >= abs_timeout_ns) {
1450 pthread_mutex_unlock(&device->mutex);
1451 result = VK_TIMEOUT;
1452 goto done;
1453 }
1454 }
1455
1456 pthread_mutex_unlock(&device->mutex);
1457 }
1458 }
1459
1460 done:
1461 if (anv_device_is_lost(device))
1462 return VK_ERROR_DEVICE_LOST;
1463
1464 return result;
1465 }
1466
1467 static VkResult
1468 anv_wait_for_wsi_fence(struct anv_device *device,
1469 struct anv_fence_impl *impl,
1470 uint64_t abs_timeout)
1471 {
1472 return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
1473 }
1474
1475 static VkResult
1476 anv_wait_for_fences(struct anv_device *device,
1477 uint32_t fenceCount,
1478 const VkFence *pFences,
1479 bool waitAll,
1480 uint64_t abs_timeout)
1481 {
1482 VkResult result = VK_SUCCESS;
1483
1484 if (fenceCount <= 1 || waitAll) {
1485 for (uint32_t i = 0; i < fenceCount; i++) {
1486 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1487 struct anv_fence_impl *impl =
1488 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1489 &fence->temporary : &fence->permanent;
1490
1491 switch (impl->type) {
1492 case ANV_FENCE_TYPE_BO:
1493 case ANV_FENCE_TYPE_WSI_BO:
1494 result = anv_wait_for_bo_fences(device, 1, &pFences[i],
1495 true, abs_timeout);
1496 break;
1497 case ANV_FENCE_TYPE_SYNCOBJ:
1498 result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
1499 true, abs_timeout);
1500 break;
1501 case ANV_FENCE_TYPE_WSI:
1502 result = anv_wait_for_wsi_fence(device, impl, abs_timeout);
1503 break;
1504 case ANV_FENCE_TYPE_NONE:
1505 result = VK_SUCCESS;
1506 break;
1507 }
1508 if (result != VK_SUCCESS)
1509 return result;
1510 }
1511 } else {
1512 do {
1513 for (uint32_t i = 0; i < fenceCount; i++) {
1514 if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
1515 return VK_SUCCESS;
1516 }
1517 } while (anv_gettime_ns() < abs_timeout);
1518 result = VK_TIMEOUT;
1519 }
1520 return result;
1521 }
1522
1523 static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
1524 {
1525 for (uint32_t i = 0; i < fenceCount; ++i) {
1526 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1527 struct anv_fence_impl *impl =
1528 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1529 &fence->temporary : &fence->permanent;
1530 if (impl->type != ANV_FENCE_TYPE_SYNCOBJ)
1531 return false;
1532 }
1533 return true;
1534 }
1535
1536 static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
1537 {
1538 for (uint32_t i = 0; i < fenceCount; ++i) {
1539 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1540 struct anv_fence_impl *impl =
1541 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1542 &fence->temporary : &fence->permanent;
1543 if (impl->type != ANV_FENCE_TYPE_BO &&
1544 impl->type != ANV_FENCE_TYPE_WSI_BO)
1545 return false;
1546 }
1547 return true;
1548 }
1549
1550 VkResult anv_WaitForFences(
1551 VkDevice _device,
1552 uint32_t fenceCount,
1553 const VkFence* pFences,
1554 VkBool32 waitAll,
1555 uint64_t timeout)
1556 {
1557 ANV_FROM_HANDLE(anv_device, device, _device);
1558
1559 if (device->no_hw)
1560 return VK_SUCCESS;
1561
1562 if (anv_device_is_lost(device))
1563 return VK_ERROR_DEVICE_LOST;
1564
1565 uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
1566 if (anv_all_fences_syncobj(fenceCount, pFences)) {
1567 return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
1568 waitAll, abs_timeout);
1569 } else if (anv_all_fences_bo(fenceCount, pFences)) {
1570 return anv_wait_for_bo_fences(device, fenceCount, pFences,
1571 waitAll, abs_timeout);
1572 } else {
1573 return anv_wait_for_fences(device, fenceCount, pFences,
1574 waitAll, abs_timeout);
1575 }
1576 }
1577
1578 void anv_GetPhysicalDeviceExternalFenceProperties(
1579 VkPhysicalDevice physicalDevice,
1580 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
1581 VkExternalFenceProperties* pExternalFenceProperties)
1582 {
1583 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1584
1585 switch (pExternalFenceInfo->handleType) {
1586 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1587 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1588 if (device->has_syncobj_wait) {
1589 pExternalFenceProperties->exportFromImportedHandleTypes =
1590 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1591 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1592 pExternalFenceProperties->compatibleHandleTypes =
1593 VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
1594 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
1595 pExternalFenceProperties->externalFenceFeatures =
1596 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
1597 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
1598 return;
1599 }
1600 break;
1601
1602 default:
1603 break;
1604 }
1605
1606 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
1607 pExternalFenceProperties->compatibleHandleTypes = 0;
1608 pExternalFenceProperties->externalFenceFeatures = 0;
1609 }
1610
1611 VkResult anv_ImportFenceFdKHR(
1612 VkDevice _device,
1613 const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
1614 {
1615 ANV_FROM_HANDLE(anv_device, device, _device);
1616 ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
1617 int fd = pImportFenceFdInfo->fd;
1618
1619 assert(pImportFenceFdInfo->sType ==
1620 VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
1621
1622 struct anv_fence_impl new_impl = {
1623 .type = ANV_FENCE_TYPE_NONE,
1624 };
1625
1626 switch (pImportFenceFdInfo->handleType) {
1627 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
1628 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1629
1630 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1631 if (!new_impl.syncobj)
1632 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1633
1634 break;
1635
1636 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
1637 /* Sync files are a bit tricky. Because we want to continue using the
1638 * syncobj implementation of WaitForFences, we don't use the sync file
1639 * directly but instead import it into a syncobj.
1640 */
1641 new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
1642
1643 new_impl.syncobj = anv_gem_syncobj_create(device, 0);
1644 if (!new_impl.syncobj)
1645 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1646
1647 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
1648 anv_gem_syncobj_destroy(device, new_impl.syncobj);
1649 return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
1650 "syncobj sync file import failed: %m");
1651 }
1652 break;
1653
1654 default:
1655 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1656 }
1657
1658 /* From the Vulkan 1.0.53 spec:
1659 *
1660 * "Importing a fence payload from a file descriptor transfers
1661 * ownership of the file descriptor from the application to the
1662 * Vulkan implementation. The application must not perform any
1663 * operations on the file descriptor after a successful import."
1664 *
1665 * If the import fails, we leave the file descriptor open.
1666 */
1667 close(fd);
1668
1669 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
1670 anv_fence_impl_cleanup(device, &fence->temporary);
1671 fence->temporary = new_impl;
1672 } else {
1673 anv_fence_impl_cleanup(device, &fence->permanent);
1674 fence->permanent = new_impl;
1675 }
1676
1677 return VK_SUCCESS;
1678 }
1679
1680 VkResult anv_GetFenceFdKHR(
1681 VkDevice _device,
1682 const VkFenceGetFdInfoKHR* pGetFdInfo,
1683 int* pFd)
1684 {
1685 ANV_FROM_HANDLE(anv_device, device, _device);
1686 ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
1687
1688 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
1689
1690 struct anv_fence_impl *impl =
1691 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1692 &fence->temporary : &fence->permanent;
1693
1694 assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
1695 switch (pGetFdInfo->handleType) {
1696 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
1697 int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1698 if (fd < 0)
1699 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1700
1701 *pFd = fd;
1702 break;
1703 }
1704
1705 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
1706 int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
1707 if (fd < 0)
1708 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1709
1710 *pFd = fd;
1711 break;
1712 }
1713
1714 default:
1715 unreachable("Invalid fence export handle type");
1716 }
1717
1718 /* From the Vulkan 1.0.53 spec:
1719 *
1720 * "Export operations have the same transference as the specified handle
1721 * type’s import operations. [...] If the fence was using a
1722 * temporarily imported payload, the fence’s prior permanent payload
1723 * will be restored.
1724 */
1725 if (impl == &fence->temporary)
1726 anv_fence_impl_cleanup(device, impl);
1727
1728 return VK_SUCCESS;
1729 }
1730
1731 // Queue semaphore functions
1732
1733 static VkSemaphoreTypeKHR
1734 get_semaphore_type(const void *pNext, uint64_t *initial_value)
1735 {
1736 const VkSemaphoreTypeCreateInfoKHR *type_info =
1737 vk_find_struct_const(pNext, SEMAPHORE_TYPE_CREATE_INFO_KHR);
1738
1739 if (!type_info)
1740 return VK_SEMAPHORE_TYPE_BINARY_KHR;
1741
1742 if (initial_value)
1743 *initial_value = type_info->initialValue;
1744 return type_info->semaphoreType;
1745 }
1746
1747 static VkResult
1748 binary_semaphore_create(struct anv_device *device,
1749 struct anv_semaphore_impl *impl,
1750 bool exportable)
1751 {
1752 if (device->physical->has_syncobj) {
1753 impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1754 impl->syncobj = anv_gem_syncobj_create(device, 0);
1755 if (!impl->syncobj)
1756 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1757 return VK_SUCCESS;
1758 } else {
1759 impl->type = ANV_SEMAPHORE_TYPE_BO;
1760 VkResult result =
1761 anv_device_alloc_bo(device, 4096,
1762 ANV_BO_ALLOC_EXTERNAL |
1763 ANV_BO_ALLOC_IMPLICIT_SYNC,
1764 0 /* explicit_address */,
1765 &impl->bo);
1766 /* If we're going to use this as a fence, we need to *not* have the
1767 * EXEC_OBJECT_ASYNC bit set.
1768 */
1769 assert(!(impl->bo->flags & EXEC_OBJECT_ASYNC));
1770 return result;
1771 }
1772 }
1773
1774 static VkResult
1775 timeline_semaphore_create(struct anv_device *device,
1776 struct anv_semaphore_impl *impl,
1777 uint64_t initial_value)
1778 {
1779 impl->type = ANV_SEMAPHORE_TYPE_TIMELINE;
1780 anv_timeline_init(device, &impl->timeline, initial_value);
1781 return VK_SUCCESS;
1782 }
1783
1784 VkResult anv_CreateSemaphore(
1785 VkDevice _device,
1786 const VkSemaphoreCreateInfo* pCreateInfo,
1787 const VkAllocationCallbacks* pAllocator,
1788 VkSemaphore* pSemaphore)
1789 {
1790 ANV_FROM_HANDLE(anv_device, device, _device);
1791 struct anv_semaphore *semaphore;
1792
1793 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
1794
1795 uint64_t timeline_value = 0;
1796 VkSemaphoreTypeKHR sem_type = get_semaphore_type(pCreateInfo->pNext, &timeline_value);
1797
1798 semaphore = vk_alloc(&device->vk.alloc, sizeof(*semaphore), 8,
1799 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1800 if (semaphore == NULL)
1801 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1802
1803 vk_object_base_init(&device->vk, &semaphore->base, VK_OBJECT_TYPE_SEMAPHORE);
1804
1805 p_atomic_set(&semaphore->refcount, 1);
1806
1807 const VkExportSemaphoreCreateInfo *export =
1808 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
1809 VkExternalSemaphoreHandleTypeFlags handleTypes =
1810 export ? export->handleTypes : 0;
1811 VkResult result;
1812
1813 if (handleTypes == 0) {
1814 if (sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR)
1815 result = binary_semaphore_create(device, &semaphore->permanent, false);
1816 else
1817 result = timeline_semaphore_create(device, &semaphore->permanent, timeline_value);
1818 if (result != VK_SUCCESS) {
1819 vk_free2(&device->vk.alloc, pAllocator, semaphore);
1820 return result;
1821 }
1822 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
1823 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
1824 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1825 result = binary_semaphore_create(device, &semaphore->permanent, true);
1826 if (result != VK_SUCCESS) {
1827 vk_free2(&device->vk.alloc, pAllocator, semaphore);
1828 return result;
1829 }
1830 } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
1831 assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
1832 assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
1833 if (device->physical->has_syncobj) {
1834 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1835 semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
1836 if (!semaphore->permanent.syncobj) {
1837 vk_free2(&device->vk.alloc, pAllocator, semaphore);
1838 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1839 }
1840 } else {
1841 semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
1842 semaphore->permanent.fd = -1;
1843 }
1844 } else {
1845 assert(!"Unknown handle type");
1846 vk_free2(&device->vk.alloc, pAllocator, semaphore);
1847 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1848 }
1849
1850 semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
1851
1852 *pSemaphore = anv_semaphore_to_handle(semaphore);
1853
1854 return VK_SUCCESS;
1855 }
1856
1857 static void
1858 anv_semaphore_impl_cleanup(struct anv_device *device,
1859 struct anv_semaphore_impl *impl)
1860 {
1861 switch (impl->type) {
1862 case ANV_SEMAPHORE_TYPE_NONE:
1863 case ANV_SEMAPHORE_TYPE_DUMMY:
1864 /* Dummy. Nothing to do */
1865 break;
1866
1867 case ANV_SEMAPHORE_TYPE_BO:
1868 case ANV_SEMAPHORE_TYPE_WSI_BO:
1869 anv_device_release_bo(device, impl->bo);
1870 break;
1871
1872 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1873 if (impl->fd >= 0)
1874 close(impl->fd);
1875 break;
1876
1877 case ANV_SEMAPHORE_TYPE_TIMELINE:
1878 anv_timeline_finish(device, &impl->timeline);
1879 break;
1880
1881 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1882 anv_gem_syncobj_destroy(device, impl->syncobj);
1883 break;
1884
1885 default:
1886 unreachable("Invalid semaphore type");
1887 }
1888
1889 impl->type = ANV_SEMAPHORE_TYPE_NONE;
1890 }
1891
1892 void
1893 anv_semaphore_reset_temporary(struct anv_device *device,
1894 struct anv_semaphore *semaphore)
1895 {
1896 if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1897 return;
1898
1899 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1900 }
1901
1902 static struct anv_semaphore *
1903 anv_semaphore_ref(struct anv_semaphore *semaphore)
1904 {
1905 assert(semaphore->refcount);
1906 p_atomic_inc(&semaphore->refcount);
1907 return semaphore;
1908 }
1909
1910 static void
1911 anv_semaphore_unref(struct anv_device *device, struct anv_semaphore *semaphore)
1912 {
1913 if (!p_atomic_dec_zero(&semaphore->refcount))
1914 return;
1915
1916 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1917 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1918
1919 vk_object_base_finish(&semaphore->base);
1920 vk_free(&device->vk.alloc, semaphore);
1921 }
1922
1923 void anv_DestroySemaphore(
1924 VkDevice _device,
1925 VkSemaphore _semaphore,
1926 const VkAllocationCallbacks* pAllocator)
1927 {
1928 ANV_FROM_HANDLE(anv_device, device, _device);
1929 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1930
1931 if (semaphore == NULL)
1932 return;
1933
1934 anv_semaphore_unref(device, semaphore);
1935 }
1936
1937 void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1938 VkPhysicalDevice physicalDevice,
1939 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1940 VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
1941 {
1942 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1943
1944 VkSemaphoreTypeKHR sem_type =
1945 get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
1946
1947 switch (pExternalSemaphoreInfo->handleType) {
1948 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1949 /* Timeline semaphores are not exportable. */
1950 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1951 break;
1952 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1953 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1954 pExternalSemaphoreProperties->compatibleHandleTypes =
1955 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1956 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1957 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1958 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1959 return;
1960
1961 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1962 if (sem_type == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1963 break;
1964 if (!device->has_exec_fence)
1965 break;
1966 pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1967 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1968 pExternalSemaphoreProperties->compatibleHandleTypes =
1969 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1970 pExternalSemaphoreProperties->externalSemaphoreFeatures =
1971 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1972 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1973 return;
1974
1975 default:
1976 break;
1977 }
1978
1979 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1980 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1981 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1982 }
1983
1984 VkResult anv_ImportSemaphoreFdKHR(
1985 VkDevice _device,
1986 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
1987 {
1988 ANV_FROM_HANDLE(anv_device, device, _device);
1989 ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1990 int fd = pImportSemaphoreFdInfo->fd;
1991
1992 struct anv_semaphore_impl new_impl = {
1993 .type = ANV_SEMAPHORE_TYPE_NONE,
1994 };
1995
1996 switch (pImportSemaphoreFdInfo->handleType) {
1997 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1998 if (device->physical->has_syncobj) {
1999 new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
2000
2001 new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
2002 if (!new_impl.syncobj)
2003 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2004 } else {
2005 new_impl.type = ANV_SEMAPHORE_TYPE_BO;
2006
2007 VkResult result = anv_device_import_bo(device, fd,
2008 ANV_BO_ALLOC_EXTERNAL |
2009 ANV_BO_ALLOC_IMPLICIT_SYNC,
2010 0 /* client_address */,
2011 &new_impl.bo);
2012 if (result != VK_SUCCESS)
2013 return result;
2014
2015 if (new_impl.bo->size < 4096) {
2016 anv_device_release_bo(device, new_impl.bo);
2017 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2018 }
2019
2020 /* If we're going to use this as a fence, we need to *not* have the
2021 * EXEC_OBJECT_ASYNC bit set.
2022 */
2023 assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
2024 }
2025
2026 /* From the Vulkan spec:
2027 *
2028 * "Importing semaphore state from a file descriptor transfers
2029 * ownership of the file descriptor from the application to the
2030 * Vulkan implementation. The application must not perform any
2031 * operations on the file descriptor after a successful import."
2032 *
2033 * If the import fails, we leave the file descriptor open.
2034 */
2035 close(fd);
2036 break;
2037
2038 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
2039 if (device->physical->has_syncobj) {
2040 new_impl = (struct anv_semaphore_impl) {
2041 .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
2042 .syncobj = anv_gem_syncobj_create(device, 0),
2043 };
2044 if (!new_impl.syncobj)
2045 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2046 if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
2047 anv_gem_syncobj_destroy(device, new_impl.syncobj);
2048 return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
2049 "syncobj sync file import failed: %m");
2050 }
2051 /* Ownership of the FD is transfered to Anv. Since we don't need it
2052 * anymore because the associated fence has been put into a syncobj,
2053 * we must close the FD.
2054 */
2055 close(fd);
2056 } else {
2057 new_impl = (struct anv_semaphore_impl) {
2058 .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
2059 .fd = fd,
2060 };
2061 }
2062 break;
2063
2064 default:
2065 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2066 }
2067
2068 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
2069 anv_semaphore_impl_cleanup(device, &semaphore->temporary);
2070 semaphore->temporary = new_impl;
2071 } else {
2072 anv_semaphore_impl_cleanup(device, &semaphore->permanent);
2073 semaphore->permanent = new_impl;
2074 }
2075
2076 return VK_SUCCESS;
2077 }
2078
2079 VkResult anv_GetSemaphoreFdKHR(
2080 VkDevice _device,
2081 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
2082 int* pFd)
2083 {
2084 ANV_FROM_HANDLE(anv_device, device, _device);
2085 ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
2086 VkResult result;
2087 int fd;
2088
2089 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
2090
2091 struct anv_semaphore_impl *impl =
2092 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2093 &semaphore->temporary : &semaphore->permanent;
2094
2095 switch (impl->type) {
2096 case ANV_SEMAPHORE_TYPE_BO:
2097 result = anv_device_export_bo(device, impl->bo, pFd);
2098 if (result != VK_SUCCESS)
2099 return result;
2100 break;
2101
2102 case ANV_SEMAPHORE_TYPE_SYNC_FILE: {
2103 /* There's a potential race here with vkQueueSubmit if you are trying
2104 * to export a semaphore Fd while the queue submit is still happening.
2105 * This can happen if we see all dependencies get resolved via timeline
2106 * semaphore waits completing before the execbuf completes and we
2107 * process the resulting out fence. To work around this, take a lock
2108 * around grabbing the fd.
2109 */
2110 pthread_mutex_lock(&device->mutex);
2111
2112 /* From the Vulkan 1.0.53 spec:
2113 *
2114 * "...exporting a semaphore payload to a handle with copy
2115 * transference has the same side effects on the source
2116 * semaphore’s payload as executing a semaphore wait operation."
2117 *
2118 * In other words, it may still be a SYNC_FD semaphore, but it's now
2119 * considered to have been waited on and no longer has a sync file
2120 * attached.
2121 */
2122 int fd = impl->fd;
2123 impl->fd = -1;
2124
2125 pthread_mutex_unlock(&device->mutex);
2126
2127 /* There are two reasons why this could happen:
2128 *
2129 * 1) The user is trying to export without submitting something that
2130 * signals the semaphore. If this is the case, it's their bug so
2131 * what we return here doesn't matter.
2132 *
2133 * 2) The kernel didn't give us a file descriptor. The most likely
2134 * reason for this is running out of file descriptors.
2135 */
2136 if (fd < 0)
2137 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2138
2139 *pFd = fd;
2140 return VK_SUCCESS;
2141 }
2142
2143 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
2144 if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
2145 fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
2146 else {
2147 assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
2148 fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
2149 }
2150 if (fd < 0)
2151 return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
2152 *pFd = fd;
2153 break;
2154
2155 default:
2156 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
2157 }
2158
2159 /* From the Vulkan 1.0.53 spec:
2160 *
2161 * "Export operations have the same transference as the specified handle
2162 * type’s import operations. [...] If the semaphore was using a
2163 * temporarily imported payload, the semaphore’s prior permanent payload
2164 * will be restored.
2165 */
2166 if (impl == &semaphore->temporary)
2167 anv_semaphore_impl_cleanup(device, impl);
2168
2169 return VK_SUCCESS;
2170 }
2171
2172 VkResult anv_GetSemaphoreCounterValue(
2173 VkDevice _device,
2174 VkSemaphore _semaphore,
2175 uint64_t* pValue)
2176 {
2177 ANV_FROM_HANDLE(anv_device, device, _device);
2178 ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
2179
2180 struct anv_semaphore_impl *impl =
2181 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2182 &semaphore->temporary : &semaphore->permanent;
2183
2184 switch (impl->type) {
2185 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2186 pthread_mutex_lock(&device->mutex);
2187 *pValue = impl->timeline.highest_past;
2188 pthread_mutex_unlock(&device->mutex);
2189 return VK_SUCCESS;
2190 }
2191
2192 default:
2193 unreachable("Invalid semaphore type");
2194 }
2195 }
2196
2197 static VkResult
2198 anv_timeline_wait_locked(struct anv_device *device,
2199 struct anv_timeline *timeline,
2200 uint64_t serial, uint64_t abs_timeout_ns)
2201 {
2202 /* Wait on the queue_submit condition variable until the timeline has a
2203 * time point pending that's at least as high as serial.
2204 */
2205 while (timeline->highest_pending < serial) {
2206 struct timespec abstime = {
2207 .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
2208 .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
2209 };
2210
2211 int ret = pthread_cond_timedwait(&device->queue_submit,
2212 &device->mutex, &abstime);
2213 assert(ret != EINVAL);
2214 if (anv_gettime_ns() >= abs_timeout_ns &&
2215 timeline->highest_pending < serial)
2216 return VK_TIMEOUT;
2217 }
2218
2219 while (1) {
2220 VkResult result = anv_timeline_gc_locked(device, timeline);
2221 if (result != VK_SUCCESS)
2222 return result;
2223
2224 if (timeline->highest_past >= serial)
2225 return VK_SUCCESS;
2226
2227 /* If we got here, our earliest time point has a busy BO */
2228 struct anv_timeline_point *point =
2229 list_first_entry(&timeline->points,
2230 struct anv_timeline_point, link);
2231
2232 /* Drop the lock while we wait. */
2233 point->waiting++;
2234 pthread_mutex_unlock(&device->mutex);
2235
2236 result = anv_device_wait(device, point->bo,
2237 anv_get_relative_timeout(abs_timeout_ns));
2238
2239 /* Pick the mutex back up */
2240 pthread_mutex_lock(&device->mutex);
2241 point->waiting--;
2242
2243 /* This covers both VK_TIMEOUT and VK_ERROR_DEVICE_LOST */
2244 if (result != VK_SUCCESS)
2245 return result;
2246 }
2247 }
2248
2249 static VkResult
2250 anv_timelines_wait(struct anv_device *device,
2251 struct anv_timeline **timelines,
2252 const uint64_t *serials,
2253 uint32_t n_timelines,
2254 bool wait_all,
2255 uint64_t abs_timeout_ns)
2256 {
2257 if (!wait_all && n_timelines > 1) {
2258 pthread_mutex_lock(&device->mutex);
2259
2260 while (1) {
2261 VkResult result;
2262 for (uint32_t i = 0; i < n_timelines; i++) {
2263 result =
2264 anv_timeline_wait_locked(device, timelines[i], serials[i], 0);
2265 if (result != VK_TIMEOUT)
2266 break;
2267 }
2268
2269 if (result != VK_TIMEOUT ||
2270 anv_gettime_ns() >= abs_timeout_ns) {
2271 pthread_mutex_unlock(&device->mutex);
2272 return result;
2273 }
2274
2275 /* If none of them are ready do a short wait so we don't completely
2276 * spin while holding the lock. The 10us is completely arbitrary.
2277 */
2278 uint64_t abs_short_wait_ns =
2279 anv_get_absolute_timeout(
2280 MIN2((anv_gettime_ns() - abs_timeout_ns) / 10, 10 * 1000));
2281 struct timespec abstime = {
2282 .tv_sec = abs_short_wait_ns / NSEC_PER_SEC,
2283 .tv_nsec = abs_short_wait_ns % NSEC_PER_SEC,
2284 };
2285 ASSERTED int ret;
2286 ret = pthread_cond_timedwait(&device->queue_submit,
2287 &device->mutex, &abstime);
2288 assert(ret != EINVAL);
2289 }
2290 } else {
2291 VkResult result = VK_SUCCESS;
2292 pthread_mutex_lock(&device->mutex);
2293 for (uint32_t i = 0; i < n_timelines; i++) {
2294 result =
2295 anv_timeline_wait_locked(device, timelines[i],
2296 serials[i], abs_timeout_ns);
2297 if (result != VK_SUCCESS)
2298 break;
2299 }
2300 pthread_mutex_unlock(&device->mutex);
2301 return result;
2302 }
2303 }
2304
2305 VkResult anv_WaitSemaphores(
2306 VkDevice _device,
2307 const VkSemaphoreWaitInfoKHR* pWaitInfo,
2308 uint64_t timeout)
2309 {
2310 ANV_FROM_HANDLE(anv_device, device, _device);
2311
2312 if (device->no_hw)
2313 return VK_SUCCESS;
2314
2315 struct anv_timeline **timelines =
2316 vk_alloc(&device->vk.alloc,
2317 pWaitInfo->semaphoreCount * sizeof(*timelines),
2318 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2319 if (!timelines)
2320 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2321
2322 uint64_t *values = vk_alloc(&device->vk.alloc,
2323 pWaitInfo->semaphoreCount * sizeof(*values),
2324 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2325 if (!values) {
2326 vk_free(&device->vk.alloc, timelines);
2327 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2328 }
2329
2330 uint32_t handle_count = 0;
2331 for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
2332 ANV_FROM_HANDLE(anv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
2333 struct anv_semaphore_impl *impl =
2334 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2335 &semaphore->temporary : &semaphore->permanent;
2336
2337 assert(impl->type == ANV_SEMAPHORE_TYPE_TIMELINE);
2338
2339 if (pWaitInfo->pValues[i] == 0)
2340 continue;
2341
2342 timelines[handle_count] = &impl->timeline;
2343 values[handle_count] = pWaitInfo->pValues[i];
2344 handle_count++;
2345 }
2346
2347 VkResult result = VK_SUCCESS;
2348 if (handle_count > 0) {
2349 result = anv_timelines_wait(device, timelines, values, handle_count,
2350 !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR),
2351 anv_get_absolute_timeout(timeout));
2352 }
2353
2354 vk_free(&device->vk.alloc, timelines);
2355 vk_free(&device->vk.alloc, values);
2356
2357 return result;
2358 }
2359
2360 VkResult anv_SignalSemaphore(
2361 VkDevice _device,
2362 const VkSemaphoreSignalInfoKHR* pSignalInfo)
2363 {
2364 ANV_FROM_HANDLE(anv_device, device, _device);
2365 ANV_FROM_HANDLE(anv_semaphore, semaphore, pSignalInfo->semaphore);
2366
2367 struct anv_semaphore_impl *impl =
2368 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
2369 &semaphore->temporary : &semaphore->permanent;
2370
2371 switch (impl->type) {
2372 case ANV_SEMAPHORE_TYPE_TIMELINE: {
2373 pthread_mutex_lock(&device->mutex);
2374
2375 VkResult result = anv_timeline_gc_locked(device, &impl->timeline);
2376
2377 assert(pSignalInfo->value > impl->timeline.highest_pending);
2378
2379 impl->timeline.highest_pending = impl->timeline.highest_past = pSignalInfo->value;
2380
2381 if (result == VK_SUCCESS)
2382 result = anv_device_submit_deferred_locked(device);
2383
2384 pthread_cond_broadcast(&device->queue_submit);
2385 pthread_mutex_unlock(&device->mutex);
2386 return result;
2387 }
2388
2389 default:
2390 unreachable("Invalid semaphore type");
2391 }
2392 }