c43c892bb87cee5650bdd212fdd4f6fb60e3d1a9
[mesa.git] / src / gallium / drivers / iris / iris_fence.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_fence.c
25 *
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
27 */
28
29 #include <linux/sync_file.h>
30
31 #include "util/u_inlines.h"
32 #include "intel/common/gen_gem.h"
33
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39
40 static uint32_t
41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43 struct drm_syncobj_create args = {
44 .flags = flags,
45 };
46
47 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48
49 return args.handle;
50 }
51
52 static void
53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55 struct drm_syncobj_destroy args = {
56 .handle = handle,
57 };
58
59 gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61
62 /**
63 * Make a new sync-point.
64 */
65 struct iris_syncobj *
66 iris_create_syncobj(struct iris_screen *screen)
67 {
68 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
69
70 if (!syncobj)
71 return NULL;
72
73 syncobj->handle = gem_syncobj_create(screen->fd, 0);
74 assert(syncobj->handle);
75
76 pipe_reference_init(&syncobj->ref, 1);
77
78 return syncobj;
79 }
80
81 void
82 iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
83 {
84 gem_syncobj_destroy(screen->fd, syncobj->handle);
85 free(syncobj);
86 }
87
88 /**
89 * Add a sync-point to the batch, with the given flags.
90 *
91 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
92 */
93 void
94 iris_batch_add_syncobj(struct iris_batch *batch,
95 struct iris_syncobj *syncobj,
96 unsigned flags)
97 {
98 struct drm_i915_gem_exec_fence *fence =
99 util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
100
101 *fence = (struct drm_i915_gem_exec_fence) {
102 .handle = syncobj->handle,
103 .flags = flags,
104 };
105
106 struct iris_syncobj **store =
107 util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
108
109 *store = NULL;
110 iris_syncobj_reference(batch->screen, store, syncobj);
111 }
112
113 /* ------------------------------------------------------------------- */
114
115 struct pipe_fence_handle {
116 struct pipe_reference ref;
117 struct iris_seqno *seqno[IRIS_BATCH_COUNT];
118 };
119
120 static void
121 iris_fence_destroy(struct pipe_screen *p_screen,
122 struct pipe_fence_handle *fence)
123 {
124 struct iris_screen *screen = (struct iris_screen *)p_screen;
125
126 for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++)
127 iris_seqno_reference(screen, &fence->seqno[i], NULL);
128
129 free(fence);
130 }
131
132 static void
133 iris_fence_reference(struct pipe_screen *p_screen,
134 struct pipe_fence_handle **dst,
135 struct pipe_fence_handle *src)
136 {
137 if (pipe_reference(*dst ? &(*dst)->ref : NULL,
138 src ? &src->ref : NULL))
139 iris_fence_destroy(p_screen, *dst);
140
141 *dst = src;
142 }
143
144 bool
145 iris_wait_syncobj(struct pipe_screen *p_screen,
146 struct iris_syncobj *syncobj,
147 int64_t timeout_nsec)
148 {
149 if (!syncobj)
150 return false;
151
152 struct iris_screen *screen = (struct iris_screen *)p_screen;
153 struct drm_syncobj_wait args = {
154 .handles = (uintptr_t)&syncobj->handle,
155 .count_handles = 1,
156 .timeout_nsec = timeout_nsec,
157 };
158 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
159 }
160
161 #define CSI "\e["
162 #define BLUE_HEADER CSI "0;97;44m"
163 #define NORMAL CSI "0m"
164
165 static void
166 iris_fence_flush(struct pipe_context *ctx,
167 struct pipe_fence_handle **out_fence,
168 unsigned flags)
169 {
170 struct iris_screen *screen = (void *) ctx->screen;
171 struct iris_context *ice = (struct iris_context *)ctx;
172
173 if (flags & PIPE_FLUSH_END_OF_FRAME) {
174 ice->frame++;
175
176 if (INTEL_DEBUG & DEBUG_SUBMIT) {
177 fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
178 (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
179 ice->frame, ctx, ' ',
180 (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
181 }
182 }
183
184 /* XXX PIPE_FLUSH_DEFERRED */
185 for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
186 iris_batch_flush(&ice->batches[i]);
187
188 if (!out_fence)
189 return;
190
191 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
192 if (!fence)
193 return;
194
195 pipe_reference_init(&fence->ref, 1);
196
197 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
198 struct iris_batch *batch = &ice->batches[b];
199
200 if (iris_seqno_signaled(batch->last_seqno))
201 continue;
202
203 iris_seqno_reference(screen, &fence->seqno[b], batch->last_seqno);
204 }
205
206 iris_fence_reference(ctx->screen, out_fence, NULL);
207 *out_fence = fence;
208 }
209
210 static void
211 iris_fence_await(struct pipe_context *ctx,
212 struct pipe_fence_handle *fence)
213 {
214 struct iris_context *ice = (struct iris_context *)ctx;
215
216 for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
217 struct iris_batch *batch = &ice->batches[b];
218
219 for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
220 struct iris_seqno *seqno = fence->seqno[i];
221
222 if (iris_seqno_signaled(seqno))
223 continue;
224
225 iris_batch_add_syncobj(batch, seqno->syncobj, I915_EXEC_FENCE_WAIT);
226 }
227 }
228 }
229
230 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
231 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
232 #define MSEC_PER_SEC (1000)
233
234 static uint64_t
235 gettime_ns(void)
236 {
237 struct timespec current;
238 clock_gettime(CLOCK_MONOTONIC, &current);
239 return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
240 }
241
242 static uint64_t
243 rel2abs(uint64_t timeout)
244 {
245 if (timeout == 0)
246 return 0;
247
248 uint64_t current_time = gettime_ns();
249 uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
250
251 timeout = MIN2(max_timeout, timeout);
252
253 return current_time + timeout;
254 }
255
256 static bool
257 iris_fence_finish(struct pipe_screen *p_screen,
258 struct pipe_context *ctx,
259 struct pipe_fence_handle *fence,
260 uint64_t timeout)
261 {
262 struct iris_screen *screen = (struct iris_screen *)p_screen;
263
264 unsigned int handle_count = 0;
265 uint32_t handles[ARRAY_SIZE(fence->seqno)];
266 for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
267 struct iris_seqno *seqno = fence->seqno[i];
268
269 if (iris_seqno_signaled(seqno))
270 continue;
271
272 handles[handle_count++] = seqno->syncobj->handle;
273 }
274
275 if (handle_count == 0)
276 return true;
277
278 struct drm_syncobj_wait args = {
279 .handles = (uintptr_t)handles,
280 .count_handles = handle_count,
281 .timeout_nsec = rel2abs(timeout),
282 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
283 };
284 return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
285 }
286
287 static int
288 sync_merge_fd(int sync_fd, int new_fd)
289 {
290 if (sync_fd == -1)
291 return new_fd;
292
293 if (new_fd == -1)
294 return sync_fd;
295
296 struct sync_merge_data args = {
297 .name = "iris fence",
298 .fd2 = new_fd,
299 .fence = -1,
300 };
301
302 gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
303 close(new_fd);
304 close(sync_fd);
305
306 return args.fence;
307 }
308
309 static int
310 iris_fence_get_fd(struct pipe_screen *p_screen,
311 struct pipe_fence_handle *fence)
312 {
313 struct iris_screen *screen = (struct iris_screen *)p_screen;
314 int fd = -1;
315
316 for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) {
317 struct iris_seqno *seqno = fence->seqno[i];
318
319 if (iris_seqno_signaled(seqno))
320 continue;
321
322 struct drm_syncobj_handle args = {
323 .handle = seqno->syncobj->handle,
324 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
325 .fd = -1,
326 };
327
328 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
329 fd = sync_merge_fd(fd, args.fd);
330 }
331
332 if (fd == -1) {
333 /* Our fence has no syncobj's recorded. This means that all of the
334 * batches had already completed, their syncobj's had been signalled,
335 * and so we didn't bother to record them. But we're being asked to
336 * export such a fence. So export a dummy already-signalled syncobj.
337 */
338 struct drm_syncobj_handle args = {
339 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
340 };
341
342 args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
343 gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
344 gem_syncobj_destroy(screen->fd, args.handle);
345 return args.fd;
346 }
347
348 return fd;
349 }
350
351 static void
352 iris_fence_create_fd(struct pipe_context *ctx,
353 struct pipe_fence_handle **out,
354 int fd,
355 enum pipe_fd_type type)
356 {
357 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
358
359 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
360 struct drm_syncobj_handle args = {
361 .handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
362 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
363 .fd = fd,
364 };
365 if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
366 fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
367 strerror(errno));
368 gem_syncobj_destroy(screen->fd, args.handle);
369 *out = NULL;
370 return;
371 }
372
373 struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
374 if (!syncobj) {
375 *out = NULL;
376 return;
377 }
378 syncobj->handle = args.handle;
379 pipe_reference_init(&syncobj->ref, 1);
380
381 struct iris_seqno *seqno = malloc(sizeof(*seqno));
382 if (!seqno) {
383 free(syncobj);
384 *out = NULL;
385 return;
386 }
387
388 static const uint32_t zero = 0;
389
390 /* Fences work in terms of iris_seqno, but we don't actually have a
391 * seqno for an imported fence. So, create a fake one which always
392 * returns as 'not signaled' so we fall back to using the sync object.
393 */
394 seqno->seqno = UINT32_MAX;
395 seqno->map = &zero;
396 seqno->syncobj = syncobj;
397 seqno->flags = IRIS_SEQNO_END;
398 pipe_reference_init(&seqno->reference, 1);
399
400 struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
401 if (!fence) {
402 free(seqno);
403 free(syncobj);
404 *out = NULL;
405 return;
406 }
407 pipe_reference_init(&fence->ref, 1);
408 fence->seqno[0] = seqno;
409
410 *out = fence;
411 }
412
413 void
414 iris_init_screen_fence_functions(struct pipe_screen *screen)
415 {
416 screen->fence_reference = iris_fence_reference;
417 screen->fence_finish = iris_fence_finish;
418 screen->fence_get_fd = iris_fence_get_fd;
419 }
420
421 void
422 iris_init_context_fence_functions(struct pipe_context *ctx)
423 {
424 ctx->flush = iris_fence_flush;
425 ctx->create_fence_fd = iris_fence_create_fd;
426 ctx->fence_server_sync = iris_fence_await;
427 }