4600c1c4238231b0709234b75c09f757b3189344
[mesa.git] / src / gallium / drivers / iris / iris_fence.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_fence.c
25 *
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
27 */
28
29 #include "drm-uapi/sync_file.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "intel/common/gen_gem.h"
33
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39
40 static uint32_t
41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43 struct drm_syncobj_create args = {
44 .flags = flags,
45 };
46
47 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48
49 return args.handle;
50 }
51
52 static void
53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55 struct drm_syncobj_destroy args = {
56 .handle = handle,
57 };
58
59 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61
62 /**
63 * Make a new sync-point.
64 */
65 struct iris_syncobj *
66 iris_create_syncobj(struct iris_screen *screen)
67 {
68 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
69
70 if (!syncobj)
71 return NULL;
72
73 syncobj->handle = gem_syncobj_create(screen->fd, 0);
74 assert(syncobj->handle);
75
76 pipe_reference_init(&syncobj->ref, 1);
77
78 return syncobj;
79 }
80
81 void
82 iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
83 {
84 gem_syncobj_destroy(screen->fd, syncobj->handle);
85 free(syncobj);
86 }
87
88 /**
89 * Add a sync-point to the batch, with the given flags.
90 *
91 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
92 */
93 void
94 iris_batch_add_syncobj(struct iris_batch *batch,
95 struct iris_syncobj *syncobj,
96 unsigned flags)
97 {
98 struct drm_i915_gem_exec_fence *fence =
99 util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
100
101 *fence = (struct drm_i915_gem_exec_fence) {
102 .handle = syncobj->handle,
103 .flags = flags,
104 };
105
106 struct iris_syncobj **store =
107 util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
108
109 *store = NULL;
110 iris_syncobj_reference(batch->screen, store, syncobj);
111 }
112
113 /* ------------------------------------------------------------------- */
114
115 struct pipe_fence_handle {
116 struct pipe_reference ref;
117
118 struct pipe_context *unflushed_ctx;
119
120 struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
121 };
122
123 static void
124 iris_fence_destroy(struct pipe_screen *p_screen,
125 struct pipe_fence_handle *fence)
126 {
127 struct iris_screen *screen = (struct iris_screen *)p_screen;
128
129 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
130 iris_fine_fence_reference(screen, &fence->fine[i], NULL);
131
132 free(fence);
133 }
134
135 static void
136 iris_fence_reference(struct pipe_screen *p_screen,
137 struct pipe_fence_handle **dst,
138 struct pipe_fence_handle *src)
139 {
140 if (pipe_reference(*dst ? &(*dst)->ref : NULL,
141 src ? &src->ref : NULL))
142 iris_fence_destroy(p_screen, *dst);
143
144 *dst = src;
145 }
146
147 bool
148 iris_wait_syncobj(struct pipe_screen *p_screen,
149 struct iris_syncobj *syncobj,
150 int64_t timeout_nsec)
151 {
152 if (!syncobj)
153 return false;
154
155 struct iris_screen *screen = (struct iris_screen *)p_screen;
156 struct drm_syncobj_wait args = {
157 .handles = (uintptr_t)&syncobj->handle,
158 .count_handles = 1,
159 .timeout_nsec = timeout_nsec,
160 };
161 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
162 }
163
164 #define CSI "\e["
165 #define BLUE_HEADER CSI "0;97;44m"
166 #define NORMAL CSI "0m"
167
168 static void
169 iris_fence_flush(struct pipe_context *ctx,
170 struct pipe_fence_handle **out_fence,
171 unsigned flags)
172 {
173 struct iris_screen *screen = (void *) ctx->screen;
174 struct iris_context *ice = (struct iris_context *)ctx;
175
176 /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
177 * deferred flushes. Just ignore the request to defer on older kernels.
178 */
179 if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
180 flags &= ~PIPE_FLUSH_DEFERRED;
181
182 const bool deferred = flags & PIPE_FLUSH_DEFERRED;
183
184 if (flags & PIPE_FLUSH_END_OF_FRAME) {
185 ice->frame++;
186
187 if (INTEL_DEBUG & DEBUG_SUBMIT) {
188 fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
189 (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
190 ice->frame, ctx, ' ',
191 (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
192 }
193 }
194
195 if (!deferred) {
196 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
197 iris_batch_flush(&ice->batches[i]);
198 }
199
200 if (!out_fence)
201 return;
202
203 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
204 if (!fence)
205 return;
206
207 pipe_reference_init(&fence->ref, 1);
208
209 if (deferred)
210 fence->unflushed_ctx = ctx;
211
212 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
213 struct iris_batch *batch = &ice->batches[b];
214
215 if (deferred && iris_batch_bytes_used(batch) > 0) {
216 struct iris_fine_fence *fine =
217 iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
218 iris_fine_fence_reference(screen, &fence->fine[b], fine);
219 iris_fine_fence_reference(screen, &fine, NULL);
220 } else {
221 /* This batch has no commands queued up (perhaps we just flushed,
222 * or all the commands are on the other batch). Wait for the last
223 * syncobj on this engine - unless it's already finished by now.
224 */
225 if (iris_fine_fence_signaled(batch->last_fence))
226 continue;
227
228 iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
229 }
230 }
231
232 iris_fence_reference(ctx->screen, out_fence, NULL);
233 *out_fence = fence;
234 }
235
236 static void
237 iris_fence_await(struct pipe_context *ctx,
238 struct pipe_fence_handle *fence)
239 {
240 struct iris_context *ice = (struct iris_context *)ctx;
241
242 /* Unflushed fences from the same context are no-ops. */
243 if (ctx && ctx == fence->unflushed_ctx)
244 return;
245
246 /* XXX: We can't safely flush the other context, because it might be
247 * bound to another thread, and poking at its internals wouldn't
248 * be safe. In the future we should use MI_SEMAPHORE_WAIT and
249 * block until the other job has been submitted, relying on
250 * kernel timeslicing to preempt us until the other job is
251 * actually flushed and the seqno finally passes.
252 */
253 if (fence->unflushed_ctx) {
254 pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
255 "glWaitSync on unflushed fence from another context "
256 "is unlikely to work without kernel 5.8+\n");
257 }
258
259 /* Flush any current work in our context as it doesn't need to wait
260 * for this fence. Any future work in our context must wait.
261 */
262 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
263 struct iris_batch *batch = &ice->batches[b];
264
265 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
266 struct iris_fine_fence *fine = fence->fine[i];
267
268 if (iris_fine_fence_signaled(fine))
269 continue;
270
271 iris_batch_flush(batch);
272 iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
273 }
274 }
275 }
276
277 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
278 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
279 #define MSEC_PER_SEC (1000)
280
281 static uint64_t
282 gettime_ns(void)
283 {
284 struct timespec current;
285 clock_gettime(CLOCK_MONOTONIC, &current);
286 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
287 }
288
289 static uint64_t
290 rel2abs(uint64_t timeout)
291 {
292 if (timeout == 0)
293 return 0;
294
295 uint64_t current_time = gettime_ns();
296 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
297
298 timeout = MIN2(max_timeout, timeout);
299
300 return current_time + timeout;
301 }
302
303 static bool
304 iris_fence_finish(struct pipe_screen *p_screen,
305 struct pipe_context *ctx,
306 struct pipe_fence_handle *fence,
307 uint64_t timeout)
308 {
309 struct iris_context *ice = (struct iris_context *)ctx;
310 struct iris_screen *screen = (struct iris_screen *)p_screen;
311
312 /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
313 * flushed yet. Check if our syncobj is the current batch's signalling
314 * syncobj - if so, we haven't flushed and need to now.
315 *
316 * The Gallium docs mention that a flush will occur if \p ctx matches
317 * the context the fence was created with. It may be NULL, so we check
318 * that it matches first.
319 */
320 if (ctx && ctx == fence->unflushed_ctx) {
321 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
322 struct iris_fine_fence *fine = fence->fine[i];
323
324 if (iris_fine_fence_signaled(fine))
325 continue;
326
327 if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
328 iris_batch_flush(&ice->batches[i]);
329 }
330
331 /* The fence is no longer deferred. */
332 fence->unflushed_ctx = NULL;
333 }
334
335 unsigned int handle_count = 0;
336 uint32_t handles[ARRAY_SIZE(fence->fine)];
337 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
338 struct iris_fine_fence *fine = fence->fine[i];
339
340 if (iris_fine_fence_signaled(fine))
341 continue;
342
343 handles[handle_count++] = fine->syncobj->handle;
344 }
345
346 if (handle_count == 0)
347 return true;
348
349 struct drm_syncobj_wait args = {
350 .handles = (uintptr_t)handles,
351 .count_handles = handle_count,
352 .timeout_nsec = rel2abs(timeout),
353 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
354 };
355
356 if (fence->unflushed_ctx) {
357 /* This fence had a deferred flush from another context. We can't
358 * safely flush it here, because the context might be bound to a
359 * different thread, and poking at its internals wouldn't be safe.
360 *
361 * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
362 * another thread submits the work.
363 */
364 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
365 }
366
367 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
368 }
369
370 static int
371 sync_merge_fd(int sync_fd, int new_fd)
372 {
373 if (sync_fd == -1)
374 return new_fd;
375
376 if (new_fd == -1)
377 return sync_fd;
378
379 struct sync_merge_data args = {
380 .name = "iris fence",
381 .fd2 = new_fd,
382 .fence = -1,
383 };
384
385 gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
386 close(new_fd);
387 close(sync_fd);
388
389 return args.fence;
390 }
391
392 static int
393 iris_fence_get_fd(struct pipe_screen *p_screen,
394 struct pipe_fence_handle *fence)
395 {
396 struct iris_screen *screen = (struct iris_screen *)p_screen;
397 int fd = -1;
398
399 /* Deferred fences aren't supported. */
400 if (fence->unflushed_ctx)
401 return -1;
402
403 for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
404 struct iris_fine_fence *fine = fence->fine[i];
405
406 if (iris_fine_fence_signaled(fine))
407 continue;
408
409 struct drm_syncobj_handle args = {
410 .handle = fine->syncobj->handle,
411 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
412 .fd = -1,
413 };
414
415 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
416 fd = sync_merge_fd(fd, args.fd);
417 }
418
419 if (fd == -1) {
420 /* Our fence has no syncobj's recorded. This means that all of the
421 * batches had already completed, their syncobj's had been signalled,
422 * and so we didn't bother to record them. But we're being asked to
423 * export such a fence. So export a dummy already-signalled syncobj.
424 */
425 struct drm_syncobj_handle args = {
426 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
427 };
428
429 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
430 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
431 gem_syncobj_destroy(screen->fd, args.handle);
432 return args.fd;
433 }
434
435 return fd;
436 }
437
438 static void
439 iris_fence_create_fd(struct pipe_context *ctx,
440 struct pipe_fence_handle **out,
441 int fd,
442 enum pipe_fd_type type)
443 {
444 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
445
446 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
447 struct drm_syncobj_handle args = {
448 .handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
449 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
450 .fd = fd,
451 };
452 if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
453 fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
454 strerror(errno));
455 gem_syncobj_destroy(screen->fd, args.handle);
456 *out = NULL;
457 return;
458 }
459
460 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
461 if (!syncobj) {
462 *out = NULL;
463 return;
464 }
465 syncobj->handle = args.handle;
466 pipe_reference_init(&syncobj->ref, 1);
467
468 struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
469 if (!fine) {
470 free(syncobj);
471 *out = NULL;
472 return;
473 }
474
475 static const uint32_t zero = 0;
476
477 /* Fences work in terms of iris_fine_fence, but we don't actually have a
478 * seqno for an imported fence. So, create a fake one which always
479 * returns as 'not signaled' so we fall back to using the sync object.
480 */
481 fine->seqno = UINT32_MAX;
482 fine->map = &zero;
483 fine->syncobj = syncobj;
484 fine->flags = IRIS_FENCE_END;
485 pipe_reference_init(&fine->reference, 1);
486
487 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
488 if (!fence) {
489 free(fine);
490 free(syncobj);
491 *out = NULL;
492 return;
493 }
494 pipe_reference_init(&fence->ref, 1);
495 fence->fine[0] = fine;
496
497 *out = fence;
498 }
499
500 void
501 iris_init_screen_fence_functions(struct pipe_screen *screen)
502 {
503 screen->fence_reference = iris_fence_reference;
504 screen->fence_finish = iris_fence_finish;
505 screen->fence_get_fd = iris_fence_get_fd;
506 }
507
508 void
509 iris_init_context_fence_functions(struct pipe_context *ctx)
510 {
511 ctx->flush = iris_fence_flush;
512 ctx->create_fence_fd = iris_fence_create_fd;
513 ctx->fence_server_sync = iris_fence_await;
514 }