Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / iris / iris_fence.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_fence.c
25 *
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
27 */
28
29 #include "drm-uapi/sync_file.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "intel/common/gen_gem.h"
33
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39
40 static uint32_t
41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43 struct drm_syncobj_create args = {
44 .flags = flags,
45 };
46
47 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48
49 return args.handle;
50 }
51
52 static void
53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55 struct drm_syncobj_destroy args = {
56 .handle = handle,
57 };
58
59 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61
62 /**
63 * Make a new sync-point.
64 */
65 struct iris_syncobj *
66 iris_create_syncobj(struct iris_screen *screen)
67 {
68 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
69
70 if (!syncobj)
71 return NULL;
72
73 syncobj->handle = gem_syncobj_create(screen->fd, 0);
74 assert(syncobj->handle);
75
76 pipe_reference_init(&syncobj->ref, 1);
77
78 return syncobj;
79 }
80
81 void
82 iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
83 {
84 gem_syncobj_destroy(screen->fd, syncobj->handle);
85 free(syncobj);
86 }
87
88 /**
89 * Add a sync-point to the batch, with the given flags.
90 *
91 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
92 */
93 void
94 iris_batch_add_syncobj(struct iris_batch *batch,
95 struct iris_syncobj *syncobj,
96 unsigned flags)
97 {
98 struct drm_i915_gem_exec_fence *fence =
99 util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
100
101 *fence = (struct drm_i915_gem_exec_fence) {
102 .handle = syncobj->handle,
103 .flags = flags,
104 };
105
106 struct iris_syncobj **store =
107 util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
108
109 *store = NULL;
110 iris_syncobj_reference(batch->screen, store, syncobj);
111 }
112
113 /**
114 * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
115 * and unreference any which have already passed.
116 *
117 * Sometimes the compute batch is seldom used, and accumulates references
118 * to stale render batches that are no longer of interest, so we can free
119 * those up.
120 */
121 static void
122 clear_stale_syncobjs(struct iris_batch *batch)
123 {
124 struct iris_screen *screen = batch->screen;
125
126 int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
127
128 assert(n == util_dynarray_num_elements(&batch->exec_fences,
129 struct drm_i915_gem_exec_fence));
130
131 /* Skip the first syncobj, as it's the signalling one. */
132 for (int i = n - 1; i > 1; i--) {
133 struct iris_syncobj **syncobj =
134 util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
135 struct drm_i915_gem_exec_fence *fence =
136 util_dynarray_element(&batch->exec_fences,
137 struct drm_i915_gem_exec_fence, i);
138 assert(fence->flags & I915_EXEC_FENCE_WAIT);
139
140 if (iris_wait_syncobj(&screen->base, *syncobj, 0))
141 continue;
142
143 /* This sync object has already passed, there's no need to continue
144 * marking it as a dependency; we can stop holding on to the reference.
145 */
146 iris_syncobj_reference(screen, syncobj, NULL);
147
148 /* Remove it from the lists; move the last element here. */
149 struct iris_syncobj **nth_syncobj =
150 util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
151 struct drm_i915_gem_exec_fence *nth_fence =
152 util_dynarray_pop_ptr(&batch->exec_fences,
153 struct drm_i915_gem_exec_fence);
154
155 if (syncobj != nth_syncobj) {
156 *syncobj = *nth_syncobj;
157 memcpy(nth_fence, fence, sizeof(*fence));
158 }
159 }
160 }
161
162 /* ------------------------------------------------------------------- */
163
164 struct pipe_fence_handle {
165 struct pipe_reference ref;
166
167 struct pipe_context *unflushed_ctx;
168
169 struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
170 };
171
172 static void
173 iris_fence_destroy(struct pipe_screen *p_screen,
174 struct pipe_fence_handle *fence)
175 {
176 struct iris_screen *screen = (struct iris_screen *)p_screen;
177
178 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
179 iris_fine_fence_reference(screen, &fence->fine[i], NULL);
180
181 free(fence);
182 }
183
184 static void
185 iris_fence_reference(struct pipe_screen *p_screen,
186 struct pipe_fence_handle **dst,
187 struct pipe_fence_handle *src)
188 {
189 if (pipe_reference(*dst ? &(*dst)->ref : NULL,
190 src ? &src->ref : NULL))
191 iris_fence_destroy(p_screen, *dst);
192
193 *dst = src;
194 }
195
196 bool
197 iris_wait_syncobj(struct pipe_screen *p_screen,
198 struct iris_syncobj *syncobj,
199 int64_t timeout_nsec)
200 {
201 if (!syncobj)
202 return false;
203
204 struct iris_screen *screen = (struct iris_screen *)p_screen;
205 struct drm_syncobj_wait args = {
206 .handles = (uintptr_t)&syncobj->handle,
207 .count_handles = 1,
208 .timeout_nsec = timeout_nsec,
209 };
210 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
211 }
212
213 #define CSI "\e["
214 #define BLUE_HEADER CSI "0;97;44m"
215 #define NORMAL CSI "0m"
216
217 static void
218 iris_fence_flush(struct pipe_context *ctx,
219 struct pipe_fence_handle **out_fence,
220 unsigned flags)
221 {
222 struct iris_screen *screen = (void *) ctx->screen;
223 struct iris_context *ice = (struct iris_context *)ctx;
224
225 /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
226 * deferred flushes. Just ignore the request to defer on older kernels.
227 */
228 if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
229 flags &= ~PIPE_FLUSH_DEFERRED;
230
231 const bool deferred = flags & PIPE_FLUSH_DEFERRED;
232
233 if (flags & PIPE_FLUSH_END_OF_FRAME) {
234 ice->frame++;
235
236 if (INTEL_DEBUG & DEBUG_SUBMIT) {
237 fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
238 (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
239 ice->frame, ctx, ' ',
240 (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
241 }
242 }
243
244 if (!deferred) {
245 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
246 iris_batch_flush(&ice->batches[i]);
247 }
248
249 if (!out_fence)
250 return;
251
252 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
253 if (!fence)
254 return;
255
256 pipe_reference_init(&fence->ref, 1);
257
258 if (deferred)
259 fence->unflushed_ctx = ctx;
260
261 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
262 struct iris_batch *batch = &ice->batches[b];
263
264 if (deferred && iris_batch_bytes_used(batch) > 0) {
265 struct iris_fine_fence *fine =
266 iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
267 iris_fine_fence_reference(screen, &fence->fine[b], fine);
268 iris_fine_fence_reference(screen, &fine, NULL);
269 } else {
270 /* This batch has no commands queued up (perhaps we just flushed,
271 * or all the commands are on the other batch). Wait for the last
272 * syncobj on this engine - unless it's already finished by now.
273 */
274 if (iris_fine_fence_signaled(batch->last_fence))
275 continue;
276
277 iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
278 }
279 }
280
281 iris_fence_reference(ctx->screen, out_fence, NULL);
282 *out_fence = fence;
283 }
284
285 static void
286 iris_fence_await(struct pipe_context *ctx,
287 struct pipe_fence_handle *fence)
288 {
289 struct iris_context *ice = (struct iris_context *)ctx;
290
291 /* Unflushed fences from the same context are no-ops. */
292 if (ctx && ctx == fence->unflushed_ctx)
293 return;
294
295 /* XXX: We can't safely flush the other context, because it might be
296 * bound to another thread, and poking at its internals wouldn't
297 * be safe. In the future we should use MI_SEMAPHORE_WAIT and
298 * block until the other job has been submitted, relying on
299 * kernel timeslicing to preempt us until the other job is
300 * actually flushed and the seqno finally passes.
301 */
302 if (fence->unflushed_ctx) {
303 pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
304 "glWaitSync on unflushed fence from another context "
305 "is unlikely to work without kernel 5.8+\n");
306 }
307
308 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
309 struct iris_fine_fence *fine = fence->fine[i];
310
311 if (iris_fine_fence_signaled(fine))
312 continue;
313
314 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
315 struct iris_batch *batch = &ice->batches[b];
316
317 /* We're going to make any future work in this batch wait for our
318 * fence to have gone by. But any currently queued work doesn't
319 * need to wait. Flush the batch now, so it can happen sooner.
320 */
321 iris_batch_flush(batch);
322
323 /* Before adding a new reference, clean out any stale ones. */
324 clear_stale_syncobjs(batch);
325
326 iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
327 }
328 }
329 }
330
331 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
332 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
333 #define MSEC_PER_SEC (1000)
334
335 static uint64_t
336 gettime_ns(void)
337 {
338 struct timespec current;
339 clock_gettime(CLOCK_MONOTONIC, &current);
340 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
341 }
342
343 static uint64_t
344 rel2abs(uint64_t timeout)
345 {
346 if (timeout == 0)
347 return 0;
348
349 uint64_t current_time = gettime_ns();
350 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
351
352 timeout = MIN2(max_timeout, timeout);
353
354 return current_time + timeout;
355 }
356
357 static bool
358 iris_fence_finish(struct pipe_screen *p_screen,
359 struct pipe_context *ctx,
360 struct pipe_fence_handle *fence,
361 uint64_t timeout)
362 {
363 struct iris_context *ice = (struct iris_context *)ctx;
364 struct iris_screen *screen = (struct iris_screen *)p_screen;
365
366 /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
367 * flushed yet. Check if our syncobj is the current batch's signalling
368 * syncobj - if so, we haven't flushed and need to now.
369 *
370 * The Gallium docs mention that a flush will occur if \p ctx matches
371 * the context the fence was created with. It may be NULL, so we check
372 * that it matches first.
373 */
374 if (ctx && ctx == fence->unflushed_ctx) {
375 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
376 struct iris_fine_fence *fine = fence->fine[i];
377
378 if (iris_fine_fence_signaled(fine))
379 continue;
380
381 if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
382 iris_batch_flush(&ice->batches[i]);
383 }
384
385 /* The fence is no longer deferred. */
386 fence->unflushed_ctx = NULL;
387 }
388
389 unsigned int handle_count = 0;
390 uint32_t handles[ARRAY_SIZE(fence->fine)];
391 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
392 struct iris_fine_fence *fine = fence->fine[i];
393
394 if (iris_fine_fence_signaled(fine))
395 continue;
396
397 handles[handle_count++] = fine->syncobj->handle;
398 }
399
400 if (handle_count == 0)
401 return true;
402
403 struct drm_syncobj_wait args = {
404 .handles = (uintptr_t)handles,
405 .count_handles = handle_count,
406 .timeout_nsec = rel2abs(timeout),
407 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
408 };
409
410 if (fence->unflushed_ctx) {
411 /* This fence had a deferred flush from another context. We can't
412 * safely flush it here, because the context might be bound to a
413 * different thread, and poking at its internals wouldn't be safe.
414 *
415 * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
416 * another thread submits the work.
417 */
418 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
419 }
420
421 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
422 }
423
424 static int
425 sync_merge_fd(int sync_fd, int new_fd)
426 {
427 if (sync_fd == -1)
428 return new_fd;
429
430 if (new_fd == -1)
431 return sync_fd;
432
433 struct sync_merge_data args = {
434 .name = "iris fence",
435 .fd2 = new_fd,
436 .fence = -1,
437 };
438
439 gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
440 close(new_fd);
441 close(sync_fd);
442
443 return args.fence;
444 }
445
446 static int
447 iris_fence_get_fd(struct pipe_screen *p_screen,
448 struct pipe_fence_handle *fence)
449 {
450 struct iris_screen *screen = (struct iris_screen *)p_screen;
451 int fd = -1;
452
453 /* Deferred fences aren't supported. */
454 if (fence->unflushed_ctx)
455 return -1;
456
457 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
458 struct iris_fine_fence *fine = fence->fine[i];
459
460 if (iris_fine_fence_signaled(fine))
461 continue;
462
463 struct drm_syncobj_handle args = {
464 .handle = fine->syncobj->handle,
465 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
466 .fd = -1,
467 };
468
469 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
470 fd = sync_merge_fd(fd, args.fd);
471 }
472
473 if (fd == -1) {
474 /* Our fence has no syncobj's recorded. This means that all of the
475 * batches had already completed, their syncobj's had been signalled,
476 * and so we didn't bother to record them. But we're being asked to
477 * export such a fence. So export a dummy already-signalled syncobj.
478 */
479 struct drm_syncobj_handle args = {
480 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
481 };
482
483 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
484 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
485 gem_syncobj_destroy(screen->fd, args.handle);
486 return args.fd;
487 }
488
489 return fd;
490 }
491
492 static void
493 iris_fence_create_fd(struct pipe_context *ctx,
494 struct pipe_fence_handle **out,
495 int fd,
496 enum pipe_fd_type type)
497 {
498 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
499
500 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
501 struct drm_syncobj_handle args = {
502 .handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
503 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
504 .fd = fd,
505 };
506 if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
507 fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
508 strerror(errno));
509 gem_syncobj_destroy(screen->fd, args.handle);
510 *out = NULL;
511 return;
512 }
513
514 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
515 if (!syncobj) {
516 *out = NULL;
517 return;
518 }
519 syncobj->handle = args.handle;
520 pipe_reference_init(&syncobj->ref, 1);
521
522 struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
523 if (!fine) {
524 free(syncobj);
525 *out = NULL;
526 return;
527 }
528
529 static const uint32_t zero = 0;
530
531 /* Fences work in terms of iris_fine_fence, but we don't actually have a
532 * seqno for an imported fence. So, create a fake one which always
533 * returns as 'not signaled' so we fall back to using the sync object.
534 */
535 fine->seqno = UINT32_MAX;
536 fine->map = &zero;
537 fine->syncobj = syncobj;
538 fine->flags = IRIS_FENCE_END;
539 pipe_reference_init(&fine->reference, 1);
540
541 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
542 if (!fence) {
543 free(fine);
544 free(syncobj);
545 *out = NULL;
546 return;
547 }
548 pipe_reference_init(&fence->ref, 1);
549 fence->fine[0] = fine;
550
551 *out = fence;
552 }
553
554 void
555 iris_init_screen_fence_functions(struct pipe_screen *screen)
556 {
557 screen->fence_reference = iris_fence_reference;
558 screen->fence_finish = iris_fence_finish;
559 screen->fence_get_fd = iris_fence_get_fd;
560 }
561
562 void
563 iris_init_context_fence_functions(struct pipe_context *ctx)
564 {
565 ctx->flush = iris_fence_flush;
566 ctx->create_fence_fd = iris_fence_create_fd;
567 ctx->fence_server_sync = iris_fence_await;
568 }