2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
32 * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33 * reference on it. We can then check for completion or wait for completion
34 * using the normal buffer object mechanisms. This does mean that if an
35 * application is using many sync objects, it will emit small batchbuffers
36 * which may end up being a significant overhead. In other tests of removing
37 * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38 * performance bottleneck, though.
41 #include <libsync.h> /* Requires Android or libdrm-2.4.72 */
43 #include "main/imports.h"
45 #include "brw_context.h"
46 #include "intel_batchbuffer.h"
49 struct brw_context
*brw
;
52 /** The fence waits for completion of brw_fence::batch_bo. */
53 BRW_FENCE_TYPE_BO_WAIT
,
55 /** The fence waits for brw_fence::sync_fd to signal. */
56 BRW_FENCE_TYPE_SYNC_FD
,
60 drm_bacon_bo
*batch_bo
;
62 /* This struct owns the fd. */
71 struct gl_sync_object gl
;
72 struct brw_fence fence
;
76 brw_fence_init(struct brw_context
*brw
, struct brw_fence
*fence
,
77 enum brw_fence_type type
)
81 mtx_init(&fence
->mutex
, mtx_plain
);
84 case BRW_FENCE_TYPE_BO_WAIT
:
85 fence
->batch_bo
= NULL
;
87 case BRW_FENCE_TYPE_SYNC_FD
:
94 brw_fence_finish(struct brw_fence
*fence
)
96 switch (fence
->type
) {
97 case BRW_FENCE_TYPE_BO_WAIT
:
99 drm_bacon_bo_unreference(fence
->batch_bo
);
101 case BRW_FENCE_TYPE_SYNC_FD
:
102 if (fence
->sync_fd
!= -1)
103 close(fence
->sync_fd
);
107 mtx_destroy(&fence
->mutex
);
110 static bool MUST_CHECK
111 brw_fence_insert_locked(struct brw_context
*brw
, struct brw_fence
*fence
)
113 brw_emit_mi_flush(brw
);
115 switch (fence
->type
) {
116 case BRW_FENCE_TYPE_BO_WAIT
:
117 assert(!fence
->batch_bo
);
118 assert(!fence
->signalled
);
120 fence
->batch_bo
= brw
->batch
.bo
;
121 drm_bacon_bo_reference(fence
->batch_bo
);
123 if (intel_batchbuffer_flush(brw
) < 0) {
124 drm_bacon_bo_unreference(fence
->batch_bo
);
125 fence
->batch_bo
= NULL
;
129 case BRW_FENCE_TYPE_SYNC_FD
:
130 assert(!fence
->signalled
);
132 if (fence
->sync_fd
== -1) {
133 /* Create an out-fence that signals after all pending commands
136 if (intel_batchbuffer_flush_fence(brw
, -1, &fence
->sync_fd
) < 0)
138 assert(fence
->sync_fd
!= -1);
140 /* Wait on the in-fence before executing any subsequently submitted
143 if (intel_batchbuffer_flush(brw
) < 0)
146 /* Emit a dummy batch just for the fence. */
147 brw_emit_mi_flush(brw
);
148 if (intel_batchbuffer_flush_fence(brw
, fence
->sync_fd
, NULL
) < 0)
157 static bool MUST_CHECK
158 brw_fence_insert(struct brw_context
*brw
, struct brw_fence
*fence
)
162 mtx_lock(&fence
->mutex
);
163 ret
= brw_fence_insert_locked(brw
, fence
);
164 mtx_unlock(&fence
->mutex
);
170 brw_fence_has_completed_locked(struct brw_fence
*fence
)
172 if (fence
->signalled
)
175 switch (fence
->type
) {
176 case BRW_FENCE_TYPE_BO_WAIT
:
177 if (!fence
->batch_bo
) {
178 /* There may be no batch if intel_batchbuffer_flush() failed. */
182 if (drm_bacon_bo_busy(fence
->batch_bo
))
185 drm_bacon_bo_unreference(fence
->batch_bo
);
186 fence
->batch_bo
= NULL
;
187 fence
->signalled
= true;
191 case BRW_FENCE_TYPE_SYNC_FD
:
192 assert(fence
->sync_fd
!= -1);
194 if (sync_wait(fence
->sync_fd
, 0) == -1)
197 fence
->signalled
= true;
206 brw_fence_has_completed(struct brw_fence
*fence
)
210 mtx_lock(&fence
->mutex
);
211 ret
= brw_fence_has_completed_locked(fence
);
212 mtx_unlock(&fence
->mutex
);
218 brw_fence_client_wait_locked(struct brw_context
*brw
, struct brw_fence
*fence
,
223 if (fence
->signalled
)
226 switch (fence
->type
) {
227 case BRW_FENCE_TYPE_BO_WAIT
:
228 if (!fence
->batch_bo
) {
229 /* There may be no batch if intel_batchbuffer_flush() failed. */
233 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
234 * immediately for timeouts <= 0. The best we can do is to clamp the
235 * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
236 * 292 years - likely not a big deal.
238 if (timeout
> INT64_MAX
)
241 if (drm_bacon_gem_bo_wait(fence
->batch_bo
, timeout
) != 0)
244 fence
->signalled
= true;
245 drm_bacon_bo_unreference(fence
->batch_bo
);
246 fence
->batch_bo
= NULL
;
249 case BRW_FENCE_TYPE_SYNC_FD
:
250 if (fence
->sync_fd
== -1)
253 if (timeout
> INT32_MAX
)
256 timeout_i32
= timeout
;
258 if (sync_wait(fence
->sync_fd
, timeout_i32
) == -1)
261 fence
->signalled
= true;
265 assert(!"bad enum brw_fence_type");
270 * Return true if the function successfully signals or has already signalled.
271 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
274 brw_fence_client_wait(struct brw_context
*brw
, struct brw_fence
*fence
,
279 mtx_lock(&fence
->mutex
);
280 ret
= brw_fence_client_wait_locked(brw
, fence
, timeout
);
281 mtx_unlock(&fence
->mutex
);
287 brw_fence_server_wait(struct brw_context
*brw
, struct brw_fence
*fence
)
289 switch (fence
->type
) {
290 case BRW_FENCE_TYPE_BO_WAIT
:
291 /* We have nothing to do for WaitSync. Our GL command stream is sequential,
292 * so given that the sync object has already flushed the batchbuffer, any
293 * batchbuffers coming after this waitsync will naturally not occur until
294 * the previous one is done.
297 case BRW_FENCE_TYPE_SYNC_FD
:
298 assert(fence
->sync_fd
!= -1);
300 /* The user wants explicit synchronization, so give them what they want. */
301 if (!brw_fence_insert(brw
, fence
)) {
302 /* FIXME: There exists no way yet to report an error here. If an error
303 * occurs, continue silently and hope for the best.
310 static struct gl_sync_object
*
311 brw_gl_new_sync(struct gl_context
*ctx
, GLuint id
)
313 struct brw_gl_sync
*sync
;
315 sync
= calloc(1, sizeof(*sync
));
323 brw_gl_delete_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
)
325 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
327 brw_fence_finish(&sync
->fence
);
332 brw_gl_fence_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
333 GLenum condition
, GLbitfield flags
)
335 struct brw_context
*brw
= brw_context(ctx
);
336 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
338 brw_fence_init(brw
, &sync
->fence
, BRW_FENCE_TYPE_BO_WAIT
);
340 if (!brw_fence_insert_locked(brw
, &sync
->fence
)) {
341 /* FIXME: There exists no way to report a GL error here. If an error
342 * occurs, continue silently and hope for the best.
348 brw_gl_client_wait_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
349 GLbitfield flags
, GLuint64 timeout
)
351 struct brw_context
*brw
= brw_context(ctx
);
352 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
354 if (brw_fence_client_wait(brw
, &sync
->fence
, timeout
))
355 sync
->gl
.StatusFlag
= 1;
359 brw_gl_server_wait_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
360 GLbitfield flags
, GLuint64 timeout
)
362 struct brw_context
*brw
= brw_context(ctx
);
363 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
365 brw_fence_server_wait(brw
, &sync
->fence
);
369 brw_gl_check_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
)
371 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
373 if (brw_fence_has_completed(&sync
->fence
))
374 sync
->gl
.StatusFlag
= 1;
378 brw_init_syncobj_functions(struct dd_function_table
*functions
)
380 functions
->NewSyncObject
= brw_gl_new_sync
;
381 functions
->DeleteSyncObject
= brw_gl_delete_sync
;
382 functions
->FenceSync
= brw_gl_fence_sync
;
383 functions
->CheckSync
= brw_gl_check_sync
;
384 functions
->ClientWaitSync
= brw_gl_client_wait_sync
;
385 functions
->ServerWaitSync
= brw_gl_server_wait_sync
;
389 brw_dri_create_fence(__DRIcontext
*ctx
)
391 struct brw_context
*brw
= ctx
->driverPrivate
;
392 struct brw_fence
*fence
;
394 fence
= calloc(1, sizeof(*fence
));
398 brw_fence_init(brw
, fence
, BRW_FENCE_TYPE_BO_WAIT
);
400 if (!brw_fence_insert_locked(brw
, fence
)) {
401 brw_fence_finish(fence
);
410 brw_dri_destroy_fence(__DRIscreen
*dri_screen
, void *_fence
)
412 struct brw_fence
*fence
= _fence
;
414 brw_fence_finish(fence
);
419 brw_dri_client_wait_sync(__DRIcontext
*ctx
, void *_fence
, unsigned flags
,
422 struct brw_fence
*fence
= _fence
;
424 return brw_fence_client_wait(fence
->brw
, fence
, timeout
);
428 brw_dri_server_wait_sync(__DRIcontext
*ctx
, void *_fence
, unsigned flags
)
430 struct brw_fence
*fence
= _fence
;
432 /* We might be called here with a NULL fence as a result of WaitSyncKHR
433 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
438 brw_fence_server_wait(fence
->brw
, fence
);
442 brw_dri_get_capabilities(__DRIscreen
*dri_screen
)
444 struct intel_screen
*screen
= dri_screen
->driverPrivate
;
447 if (screen
->has_exec_fence
)
448 caps
|= __DRI_FENCE_CAP_NATIVE_FD
;
454 brw_dri_create_fence_fd(__DRIcontext
*dri_ctx
, int fd
)
456 struct brw_context
*brw
= dri_ctx
->driverPrivate
;
457 struct brw_fence
*fence
;
459 assert(brw
->screen
->has_exec_fence
);
461 fence
= calloc(1, sizeof(*fence
));
465 brw_fence_init(brw
, fence
, BRW_FENCE_TYPE_SYNC_FD
);
468 /* Create an out-fence fd */
469 if (!brw_fence_insert_locked(brw
, fence
))
472 /* Import the sync fd as an in-fence. */
476 assert(fence
->sync_fd
!= -1);
481 brw_fence_finish(fence
);
487 brw_dri_get_fence_fd_locked(struct brw_fence
*fence
)
489 assert(fence
->type
== BRW_FENCE_TYPE_SYNC_FD
);
490 return dup(fence
->sync_fd
);
494 brw_dri_get_fence_fd(__DRIscreen
*dri_screen
, void *_fence
)
496 struct brw_fence
*fence
= _fence
;
499 mtx_lock(&fence
->mutex
);
500 fd
= brw_dri_get_fence_fd_locked(fence
);
501 mtx_unlock(&fence
->mutex
);
506 const __DRI2fenceExtension intelFenceExtension
= {
507 .base
= { __DRI2_FENCE
, 2 },
509 .create_fence
= brw_dri_create_fence
,
510 .destroy_fence
= brw_dri_destroy_fence
,
511 .client_wait_sync
= brw_dri_client_wait_sync
,
512 .server_wait_sync
= brw_dri_server_wait_sync
,
513 .get_fence_from_cl_event
= NULL
,
514 .get_capabilities
= brw_dri_get_capabilities
,
515 .create_fence_fd
= brw_dri_create_fence_fd
,
516 .get_fence_fd
= brw_dri_get_fence_fd
,