2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * Fences for driver and IPC serialisation, scheduling and synchronisation.
29 #include <linux/sync_file.h>
31 #include "util/u_inlines.h"
32 #include "intel/common/gen_gem.h"
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
41 gem_syncobj_create(int fd
, uint32_t flags
)
43 struct drm_syncobj_create args
= {
47 gen_ioctl(fd
, DRM_IOCTL_SYNCOBJ_CREATE
, &args
);
53 gem_syncobj_destroy(int fd
, uint32_t handle
)
55 struct drm_syncobj_destroy args
= {
59 gen_ioctl(fd
, DRM_IOCTL_SYNCOBJ_DESTROY
, &args
);
63 * Make a new sync-point.
66 iris_create_syncobj(struct iris_screen
*screen
)
68 struct iris_syncobj
*syncobj
= malloc(sizeof(*syncobj
));
73 syncobj
->handle
= gem_syncobj_create(screen
->fd
, 0);
74 assert(syncobj
->handle
);
76 pipe_reference_init(&syncobj
->ref
, 1);
82 iris_syncobj_destroy(struct iris_screen
*screen
, struct iris_syncobj
*syncobj
)
84 gem_syncobj_destroy(screen
->fd
, syncobj
->handle
);
89 * Add a sync-point to the batch, with the given flags.
91 * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
94 iris_batch_add_syncobj(struct iris_batch
*batch
,
95 struct iris_syncobj
*syncobj
,
98 struct drm_i915_gem_exec_fence
*fence
=
99 util_dynarray_grow(&batch
->exec_fences
, struct drm_i915_gem_exec_fence
, 1);
101 *fence
= (struct drm_i915_gem_exec_fence
) {
102 .handle
= syncobj
->handle
,
106 struct iris_syncobj
**store
=
107 util_dynarray_grow(&batch
->syncobjs
, struct iris_syncobj
*, 1);
110 iris_syncobj_reference(batch
->screen
, store
, syncobj
);
113 /* ------------------------------------------------------------------- */
115 struct pipe_fence_handle
{
116 struct pipe_reference ref
;
117 struct iris_seqno
*seqno
[IRIS_BATCH_COUNT
];
121 iris_fence_destroy(struct pipe_screen
*p_screen
,
122 struct pipe_fence_handle
*fence
)
124 struct iris_screen
*screen
= (struct iris_screen
*)p_screen
;
126 for (unsigned i
= 0; i
< ARRAY_SIZE(fence
->seqno
); i
++)
127 iris_seqno_reference(screen
, &fence
->seqno
[i
], NULL
);
133 iris_fence_reference(struct pipe_screen
*p_screen
,
134 struct pipe_fence_handle
**dst
,
135 struct pipe_fence_handle
*src
)
137 if (pipe_reference(*dst
? &(*dst
)->ref
: NULL
,
138 src
? &src
->ref
: NULL
))
139 iris_fence_destroy(p_screen
, *dst
);
145 iris_wait_syncobj(struct pipe_screen
*p_screen
,
146 struct iris_syncobj
*syncobj
,
147 int64_t timeout_nsec
)
152 struct iris_screen
*screen
= (struct iris_screen
*)p_screen
;
153 struct drm_syncobj_wait args
= {
154 .handles
= (uintptr_t)&syncobj
->handle
,
156 .timeout_nsec
= timeout_nsec
,
158 return gen_ioctl(screen
->fd
, DRM_IOCTL_SYNCOBJ_WAIT
, &args
);
162 #define BLUE_HEADER CSI "0;97;44m"
163 #define NORMAL CSI "0m"
166 iris_fence_flush(struct pipe_context
*ctx
,
167 struct pipe_fence_handle
**out_fence
,
170 struct iris_screen
*screen
= (void *) ctx
->screen
;
171 struct iris_context
*ice
= (struct iris_context
*)ctx
;
173 if (flags
& PIPE_FLUSH_END_OF_FRAME
) {
176 if (INTEL_DEBUG
& DEBUG_SUBMIT
) {
177 fprintf(stderr
, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
178 (INTEL_DEBUG
& DEBUG_COLOR
) ? BLUE_HEADER
: "",
179 ice
->frame
, ctx
, ' ',
180 (INTEL_DEBUG
& DEBUG_COLOR
) ? NORMAL
: "");
184 /* XXX PIPE_FLUSH_DEFERRED */
185 for (unsigned i
= 0; i
< IRIS_BATCH_COUNT
; i
++)
186 iris_batch_flush(&ice
->batches
[i
]);
191 struct pipe_fence_handle
*fence
= calloc(1, sizeof(*fence
));
195 pipe_reference_init(&fence
->ref
, 1);
197 for (unsigned b
= 0; b
< IRIS_BATCH_COUNT
; b
++) {
198 struct iris_batch
*batch
= &ice
->batches
[b
];
200 if (iris_seqno_signaled(batch
->last_seqno
))
203 iris_seqno_reference(screen
, &fence
->seqno
[b
], batch
->last_seqno
);
206 iris_fence_reference(ctx
->screen
, out_fence
, NULL
);
211 iris_fence_await(struct pipe_context
*ctx
,
212 struct pipe_fence_handle
*fence
)
214 struct iris_context
*ice
= (struct iris_context
*)ctx
;
216 /* Flush any current work in our context as it doesn't need to wait
217 * for this fence. Any future work in our context must wait.
219 for (unsigned b
= 0; b
< IRIS_BATCH_COUNT
; b
++) {
220 struct iris_batch
*batch
= &ice
->batches
[b
];
222 for (unsigned i
= 0; i
< ARRAY_SIZE(fence
->seqno
); i
++) {
223 struct iris_seqno
*seqno
= fence
->seqno
[i
];
225 if (iris_seqno_signaled(seqno
))
228 iris_batch_flush(batch
);
229 iris_batch_add_syncobj(batch
, seqno
->syncobj
, I915_EXEC_FENCE_WAIT
);
234 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
235 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
236 #define MSEC_PER_SEC (1000)
241 struct timespec current
;
242 clock_gettime(CLOCK_MONOTONIC
, ¤t
);
243 return (uint64_t)current
.tv_sec
* NSEC_PER_SEC
+ current
.tv_nsec
;
247 rel2abs(uint64_t timeout
)
252 uint64_t current_time
= gettime_ns();
253 uint64_t max_timeout
= (uint64_t) INT64_MAX
- current_time
;
255 timeout
= MIN2(max_timeout
, timeout
);
257 return current_time
+ timeout
;
261 iris_fence_finish(struct pipe_screen
*p_screen
,
262 struct pipe_context
*ctx
,
263 struct pipe_fence_handle
*fence
,
266 struct iris_screen
*screen
= (struct iris_screen
*)p_screen
;
268 unsigned int handle_count
= 0;
269 uint32_t handles
[ARRAY_SIZE(fence
->seqno
)];
270 for (unsigned i
= 0; i
< ARRAY_SIZE(fence
->seqno
); i
++) {
271 struct iris_seqno
*seqno
= fence
->seqno
[i
];
273 if (iris_seqno_signaled(seqno
))
276 handles
[handle_count
++] = seqno
->syncobj
->handle
;
279 if (handle_count
== 0)
282 struct drm_syncobj_wait args
= {
283 .handles
= (uintptr_t)handles
,
284 .count_handles
= handle_count
,
285 .timeout_nsec
= rel2abs(timeout
),
286 .flags
= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
288 return gen_ioctl(screen
->fd
, DRM_IOCTL_SYNCOBJ_WAIT
, &args
) == 0;
292 sync_merge_fd(int sync_fd
, int new_fd
)
300 struct sync_merge_data args
= {
301 .name
= "iris fence",
306 gen_ioctl(sync_fd
, SYNC_IOC_MERGE
, &args
);
314 iris_fence_get_fd(struct pipe_screen
*p_screen
,
315 struct pipe_fence_handle
*fence
)
317 struct iris_screen
*screen
= (struct iris_screen
*)p_screen
;
320 for (unsigned i
= 0; i
< ARRAY_SIZE(fence
->seqno
); i
++) {
321 struct iris_seqno
*seqno
= fence
->seqno
[i
];
323 if (iris_seqno_signaled(seqno
))
326 struct drm_syncobj_handle args
= {
327 .handle
= seqno
->syncobj
->handle
,
328 .flags
= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
,
332 gen_ioctl(screen
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
333 fd
= sync_merge_fd(fd
, args
.fd
);
337 /* Our fence has no syncobj's recorded. This means that all of the
338 * batches had already completed, their syncobj's had been signalled,
339 * and so we didn't bother to record them. But we're being asked to
340 * export such a fence. So export a dummy already-signalled syncobj.
342 struct drm_syncobj_handle args
= {
343 .flags
= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
, .fd
= -1,
346 args
.handle
= gem_syncobj_create(screen
->fd
, DRM_SYNCOBJ_CREATE_SIGNALED
);
347 gen_ioctl(screen
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
348 gem_syncobj_destroy(screen
->fd
, args
.handle
);
356 iris_fence_create_fd(struct pipe_context
*ctx
,
357 struct pipe_fence_handle
**out
,
359 enum pipe_fd_type type
)
361 assert(type
== PIPE_FD_TYPE_NATIVE_SYNC
);
363 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
364 struct drm_syncobj_handle args
= {
365 .handle
= gem_syncobj_create(screen
->fd
, DRM_SYNCOBJ_CREATE_SIGNALED
),
366 .flags
= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
,
369 if (gen_ioctl(screen
->fd
, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE
, &args
) == -1) {
370 fprintf(stderr
, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
372 gem_syncobj_destroy(screen
->fd
, args
.handle
);
377 struct iris_syncobj
*syncobj
= malloc(sizeof(*syncobj
));
382 syncobj
->handle
= args
.handle
;
383 pipe_reference_init(&syncobj
->ref
, 1);
385 struct iris_seqno
*seqno
= malloc(sizeof(*seqno
));
392 static const uint32_t zero
= 0;
394 /* Fences work in terms of iris_seqno, but we don't actually have a
395 * seqno for an imported fence. So, create a fake one which always
396 * returns as 'not signaled' so we fall back to using the sync object.
398 seqno
->seqno
= UINT32_MAX
;
400 seqno
->syncobj
= syncobj
;
401 seqno
->flags
= IRIS_SEQNO_END
;
402 pipe_reference_init(&seqno
->reference
, 1);
404 struct pipe_fence_handle
*fence
= calloc(1, sizeof(*fence
));
411 pipe_reference_init(&fence
->ref
, 1);
412 fence
->seqno
[0] = seqno
;
418 iris_init_screen_fence_functions(struct pipe_screen
*screen
)
420 screen
->fence_reference
= iris_fence_reference
;
421 screen
->fence_finish
= iris_fence_finish
;
422 screen
->fence_get_fd
= iris_fence_get_fd
;
426 iris_init_context_fence_functions(struct pipe_context
*ctx
)
428 ctx
->flush
= iris_fence_flush
;
429 ctx
->create_fence_fd
= iris_fence_create_fd
;
430 ctx
->fence_server_sync
= iris_fence_await
;