iris: Better handle metadata in NIR passes
[mesa.git] / src / gallium / drivers / iris / iris_fence.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_fence.c
25 *
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
27 */
28
29 #include <linux/sync_file.h>
30
31 #include "util/u_debug.h"
32 #include "util/u_inlines.h"
33 #include "intel/common/gen_gem.h"
34
35 #include "iris_batch.h"
36 #include "iris_bufmgr.h"
37 #include "iris_context.h"
38 #include "iris_fence.h"
39 #include "iris_screen.h"
40
41 static uint32_t
42 gem_syncobj_create(int fd, uint32_t flags)
43 {
44 struct drm_syncobj_create args = {
45 .flags = flags,
46 };
47
48 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
49
50 return args.handle;
51 }
52
53 static void
54 gem_syncobj_destroy(int fd, uint32_t handle)
55 {
56 struct drm_syncobj_destroy args = {
57 .handle = handle,
58 };
59
60 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
61 }
62
63 /**
64 * Make a new sync-point.
65 */
66 struct iris_syncobj *
67 iris_create_syncobj(struct iris_screen *screen)
68 {
69 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
70
71 if (!syncobj)
72 return NULL;
73
74 syncobj->handle = gem_syncobj_create(screen->fd, 0);
75 assert(syncobj->handle);
76
77 pipe_reference_init(&syncobj->ref, 1);
78
79 return syncobj;
80 }
81
82 void
83 iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
84 {
85 gem_syncobj_destroy(screen->fd, syncobj->handle);
86 free(syncobj);
87 }
88
89 /**
90 * Add a sync-point to the batch, with the given flags.
91 *
92 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
93 */
94 void
95 iris_batch_add_syncobj(struct iris_batch *batch,
96 struct iris_syncobj *syncobj,
97 unsigned flags)
98 {
99 struct drm_i915_gem_exec_fence *fence =
100 util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
101
102 *fence = (struct drm_i915_gem_exec_fence) {
103 .handle = syncobj->handle,
104 .flags = flags,
105 };
106
107 struct iris_syncobj **store =
108 util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
109
110 *store = NULL;
111 iris_syncobj_reference(batch->screen, store, syncobj);
112 }
113
114 /* ------------------------------------------------------------------- */
115
116 struct pipe_fence_handle {
117 struct pipe_reference ref;
118
119 struct pipe_context *unflushed_ctx;
120
121 struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
122 };
123
124 static void
125 iris_fence_destroy(struct pipe_screen *p_screen,
126 struct pipe_fence_handle *fence)
127 {
128 struct iris_screen *screen = (struct iris_screen *)p_screen;
129
130 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
131 iris_fine_fence_reference(screen, &fence->fine[i], NULL);
132
133 free(fence);
134 }
135
136 static void
137 iris_fence_reference(struct pipe_screen *p_screen,
138 struct pipe_fence_handle **dst,
139 struct pipe_fence_handle *src)
140 {
141 if (pipe_reference(*dst ? &(*dst)->ref : NULL,
142 src ? &src->ref : NULL))
143 iris_fence_destroy(p_screen, *dst);
144
145 *dst = src;
146 }
147
148 bool
149 iris_wait_syncobj(struct pipe_screen *p_screen,
150 struct iris_syncobj *syncobj,
151 int64_t timeout_nsec)
152 {
153 if (!syncobj)
154 return false;
155
156 struct iris_screen *screen = (struct iris_screen *)p_screen;
157 struct drm_syncobj_wait args = {
158 .handles = (uintptr_t)&syncobj->handle,
159 .count_handles = 1,
160 .timeout_nsec = timeout_nsec,
161 };
162 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
163 }
164
165 #define CSI "\e["
166 #define BLUE_HEADER CSI "0;97;44m"
167 #define NORMAL CSI "0m"
168
169 static void
170 iris_fence_flush(struct pipe_context *ctx,
171 struct pipe_fence_handle **out_fence,
172 unsigned flags)
173 {
174 struct iris_screen *screen = (void *) ctx->screen;
175 struct iris_context *ice = (struct iris_context *)ctx;
176
177 /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
178 * deferred flushes. Just ignore the request to defer on older kernels.
179 */
180 if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
181 flags &= ~PIPE_FLUSH_DEFERRED;
182
183 const bool deferred = flags & PIPE_FLUSH_DEFERRED;
184
185 if (flags & PIPE_FLUSH_END_OF_FRAME) {
186 ice->frame++;
187
188 if (INTEL_DEBUG & DEBUG_SUBMIT) {
189 fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
190 (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
191 ice->frame, ctx, ' ',
192 (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
193 }
194 }
195
196 if (!deferred) {
197 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
198 iris_batch_flush(&ice->batches[i]);
199 }
200
201 if (!out_fence)
202 return;
203
204 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
205 if (!fence)
206 return;
207
208 pipe_reference_init(&fence->ref, 1);
209
210 if (deferred)
211 fence->unflushed_ctx = ctx;
212
213 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
214 struct iris_batch *batch = &ice->batches[b];
215
216 if (deferred && iris_batch_bytes_used(batch) > 0) {
217 struct iris_fine_fence *fine =
218 iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
219 iris_fine_fence_reference(screen, &fence->fine[b], fine);
220 iris_fine_fence_reference(screen, &fine, NULL);
221 } else {
222 /* This batch has no commands queued up (perhaps we just flushed,
223 * or all the commands are on the other batch). Wait for the last
224 * syncobj on this engine - unless it's already finished by now.
225 */
226 if (iris_fine_fence_signaled(batch->last_fence))
227 continue;
228
229 iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
230 }
231 }
232
233 iris_fence_reference(ctx->screen, out_fence, NULL);
234 *out_fence = fence;
235 }
236
237 static void
238 iris_fence_await(struct pipe_context *ctx,
239 struct pipe_fence_handle *fence)
240 {
241 struct iris_context *ice = (struct iris_context *)ctx;
242
243 /* Unflushed fences from the same context are no-ops. */
244 if (ctx && ctx == fence->unflushed_ctx)
245 return;
246
247 /* XXX: We can't safely flush the other context, because it might be
248 * bound to another thread, and poking at its internals wouldn't
249 * be safe. In the future we should use MI_SEMAPHORE_WAIT and
250 * block until the other job has been submitted, relying on
251 * kernel timeslicing to preempt us until the other job is
252 * actually flushed and the seqno finally passes.
253 */
254 if (fence->unflushed_ctx) {
255 pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
256 "glWaitSync on unflushed fence from another context "
257 "is unlikely to work without kernel 5.8+\n");
258 }
259
260 /* Flush any current work in our context as it doesn't need to wait
261 * for this fence. Any future work in our context must wait.
262 */
263 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
264 struct iris_batch *batch = &ice->batches[b];
265
266 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
267 struct iris_fine_fence *fine = fence->fine[i];
268
269 if (iris_fine_fence_signaled(fine))
270 continue;
271
272 iris_batch_flush(batch);
273 iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
274 }
275 }
276 }
277
278 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
279 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
280 #define MSEC_PER_SEC (1000)
281
282 static uint64_t
283 gettime_ns(void)
284 {
285 struct timespec current;
286 clock_gettime(CLOCK_MONOTONIC, &current);
287 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
288 }
289
290 static uint64_t
291 rel2abs(uint64_t timeout)
292 {
293 if (timeout == 0)
294 return 0;
295
296 uint64_t current_time = gettime_ns();
297 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
298
299 timeout = MIN2(max_timeout, timeout);
300
301 return current_time + timeout;
302 }
303
304 static bool
305 iris_fence_finish(struct pipe_screen *p_screen,
306 struct pipe_context *ctx,
307 struct pipe_fence_handle *fence,
308 uint64_t timeout)
309 {
310 struct iris_context *ice = (struct iris_context *)ctx;
311 struct iris_screen *screen = (struct iris_screen *)p_screen;
312
313 /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
314 * flushed yet. Check if our syncobj is the current batch's signalling
315 * syncobj - if so, we haven't flushed and need to now.
316 *
317 * The Gallium docs mention that a flush will occur if \p ctx matches
318 * the context the fence was created with. It may be NULL, so we check
319 * that it matches first.
320 */
321 if (ctx && ctx == fence->unflushed_ctx) {
322 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
323 struct iris_fine_fence *fine = fence->fine[i];
324
325 if (iris_fine_fence_signaled(fine))
326 continue;
327
328 if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
329 iris_batch_flush(&ice->batches[i]);
330 }
331
332 /* The fence is no longer deferred. */
333 fence->unflushed_ctx = NULL;
334 }
335
336 unsigned int handle_count = 0;
337 uint32_t handles[ARRAY_SIZE(fence->fine)];
338 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
339 struct iris_fine_fence *fine = fence->fine[i];
340
341 if (iris_fine_fence_signaled(fine))
342 continue;
343
344 handles[handle_count++] = fine->syncobj->handle;
345 }
346
347 if (handle_count == 0)
348 return true;
349
350 struct drm_syncobj_wait args = {
351 .handles = (uintptr_t)handles,
352 .count_handles = handle_count,
353 .timeout_nsec = rel2abs(timeout),
354 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
355 };
356
357 if (fence->unflushed_ctx) {
358 /* This fence had a deferred flush from another context. We can't
359 * safely flush it here, because the context might be bound to a
360 * different thread, and poking at its internals wouldn't be safe.
361 *
362 * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
363 * another thread submits the work.
364 */
365 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
366 }
367
368 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
369 }
370
371 static int
372 sync_merge_fd(int sync_fd, int new_fd)
373 {
374 if (sync_fd == -1)
375 return new_fd;
376
377 if (new_fd == -1)
378 return sync_fd;
379
380 struct sync_merge_data args = {
381 .name = "iris fence",
382 .fd2 = new_fd,
383 .fence = -1,
384 };
385
386 gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
387 close(new_fd);
388 close(sync_fd);
389
390 return args.fence;
391 }
392
393 static int
394 iris_fence_get_fd(struct pipe_screen *p_screen,
395 struct pipe_fence_handle *fence)
396 {
397 struct iris_screen *screen = (struct iris_screen *)p_screen;
398 int fd = -1;
399
400 /* Deferred fences aren't supported. */
401 if (fence->unflushed_ctx)
402 return -1;
403
404 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
405 struct iris_fine_fence *fine = fence->fine[i];
406
407 if (iris_fine_fence_signaled(fine))
408 continue;
409
410 struct drm_syncobj_handle args = {
411 .handle = fine->syncobj->handle,
412 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
413 .fd = -1,
414 };
415
416 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
417 fd = sync_merge_fd(fd, args.fd);
418 }
419
420 if (fd == -1) {
421 /* Our fence has no syncobj's recorded. This means that all of the
422 * batches had already completed, their syncobj's had been signalled,
423 * and so we didn't bother to record them. But we're being asked to
424 * export such a fence. So export a dummy already-signalled syncobj.
425 */
426 struct drm_syncobj_handle args = {
427 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
428 };
429
430 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
431 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
432 gem_syncobj_destroy(screen->fd, args.handle);
433 return args.fd;
434 }
435
436 return fd;
437 }
438
439 static void
440 iris_fence_create_fd(struct pipe_context *ctx,
441 struct pipe_fence_handle **out,
442 int fd,
443 enum pipe_fd_type type)
444 {
445 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
446
447 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
448 struct drm_syncobj_handle args = {
449 .handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
450 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
451 .fd = fd,
452 };
453 if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
454 fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
455 strerror(errno));
456 gem_syncobj_destroy(screen->fd, args.handle);
457 *out = NULL;
458 return;
459 }
460
461 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
462 if (!syncobj) {
463 *out = NULL;
464 return;
465 }
466 syncobj->handle = args.handle;
467 pipe_reference_init(&syncobj->ref, 1);
468
469 struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
470 if (!fine) {
471 free(syncobj);
472 *out = NULL;
473 return;
474 }
475
476 static const uint32_t zero = 0;
477
478 /* Fences work in terms of iris_fine_fence, but we don't actually have a
479 * seqno for an imported fence. So, create a fake one which always
480 * returns as 'not signaled' so we fall back to using the sync object.
481 */
482 fine->seqno = UINT32_MAX;
483 fine->map = &zero;
484 fine->syncobj = syncobj;
485 fine->flags = IRIS_FENCE_END;
486 pipe_reference_init(&fine->reference, 1);
487
488 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
489 if (!fence) {
490 free(fine);
491 free(syncobj);
492 *out = NULL;
493 return;
494 }
495 pipe_reference_init(&fence->ref, 1);
496 fence->fine[0] = fine;
497
498 *out = fence;
499 }
500
501 void
502 iris_init_screen_fence_functions(struct pipe_screen *screen)
503 {
504 screen->fence_reference = iris_fence_reference;
505 screen->fence_finish = iris_fence_finish;
506 screen->fence_get_fd = iris_fence_get_fd;
507 }
508
509 void
510 iris_init_context_fence_functions(struct pipe_context *ctx)
511 {
512 ctx->flush = iris_fence_flush;
513 ctx->create_fence_fd = iris_fence_create_fd;
514 ctx->fence_server_sync = iris_fence_await;
515 }