2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
32 * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33 * reference on it. We can then check for completion or wait for completion
34 * using the normal buffer object mechanisms. This does mean that if an
35 * application is using many sync objects, it will emit small batchbuffers
36 * which may end up being a significant overhead. In other tests of removing
37 * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38 * performance bottleneck, though.
41 #include "main/imports.h"
43 #include "brw_context.h"
44 #include "intel_batchbuffer.h"
47 struct brw_context
*brw
;
48 /** The fence waits for completion of this batch. */
49 drm_intel_bo
*batch_bo
;
56 struct gl_sync_object gl
;
57 struct brw_fence fence
;
61 brw_fence_init(struct brw_context
*brw
, struct brw_fence
*fence
)
64 fence
->batch_bo
= NULL
;
65 mtx_init(&fence
->mutex
, mtx_plain
);
69 brw_fence_finish(struct brw_fence
*fence
)
72 drm_intel_bo_unreference(fence
->batch_bo
);
74 mtx_destroy(&fence
->mutex
);
78 brw_fence_insert(struct brw_context
*brw
, struct brw_fence
*fence
)
80 assert(!fence
->batch_bo
);
81 assert(!fence
->signalled
);
83 brw_emit_mi_flush(brw
);
84 fence
->batch_bo
= brw
->batch
.bo
;
85 drm_intel_bo_reference(fence
->batch_bo
);
86 intel_batchbuffer_flush(brw
);
90 brw_fence_has_completed_locked(struct brw_fence
*fence
)
95 if (fence
->batch_bo
&& !drm_intel_bo_busy(fence
->batch_bo
)) {
96 drm_intel_bo_unreference(fence
->batch_bo
);
97 fence
->batch_bo
= NULL
;
98 fence
->signalled
= true;
106 brw_fence_has_completed(struct brw_fence
*fence
)
110 mtx_lock(&fence
->mutex
);
111 ret
= brw_fence_has_completed_locked(fence
);
112 mtx_unlock(&fence
->mutex
);
118 brw_fence_client_wait_locked(struct brw_context
*brw
, struct brw_fence
*fence
,
121 if (fence
->signalled
)
124 assert(fence
->batch_bo
);
126 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
127 * immediately for timeouts <= 0. The best we can do is to clamp the
128 * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
129 * 292 years - likely not a big deal.
131 if (timeout
> INT64_MAX
)
134 if (drm_intel_gem_bo_wait(fence
->batch_bo
, timeout
) != 0)
137 fence
->signalled
= true;
138 drm_intel_bo_unreference(fence
->batch_bo
);
139 fence
->batch_bo
= NULL
;
145 * Return true if the function successfully signals or has already signalled.
146 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
149 brw_fence_client_wait(struct brw_context
*brw
, struct brw_fence
*fence
,
154 mtx_lock(&fence
->mutex
);
155 ret
= brw_fence_client_wait_locked(brw
, fence
, timeout
);
156 mtx_unlock(&fence
->mutex
);
162 brw_fence_server_wait(struct brw_context
*brw
, struct brw_fence
*fence
)
164 /* We have nothing to do for WaitSync. Our GL command stream is sequential,
165 * so given that the sync object has already flushed the batchbuffer, any
166 * batchbuffers coming after this waitsync will naturally not occur until
167 * the previous one is done.
171 static struct gl_sync_object
*
172 brw_gl_new_sync(struct gl_context
*ctx
, GLuint id
)
174 struct brw_gl_sync
*sync
;
176 sync
= calloc(1, sizeof(*sync
));
184 brw_gl_delete_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
)
186 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
188 brw_fence_finish(&sync
->fence
);
193 brw_gl_fence_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
194 GLenum condition
, GLbitfield flags
)
196 struct brw_context
*brw
= brw_context(ctx
);
197 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
199 brw_fence_init(brw
, &sync
->fence
);
200 brw_fence_insert(brw
, &sync
->fence
);
204 brw_gl_client_wait_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
205 GLbitfield flags
, GLuint64 timeout
)
207 struct brw_context
*brw
= brw_context(ctx
);
208 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
210 if (brw_fence_client_wait(brw
, &sync
->fence
, timeout
))
211 sync
->gl
.StatusFlag
= 1;
215 brw_gl_server_wait_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
,
216 GLbitfield flags
, GLuint64 timeout
)
218 struct brw_context
*brw
= brw_context(ctx
);
219 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
221 brw_fence_server_wait(brw
, &sync
->fence
);
225 brw_gl_check_sync(struct gl_context
*ctx
, struct gl_sync_object
*_sync
)
227 struct brw_gl_sync
*sync
= (struct brw_gl_sync
*) _sync
;
229 if (brw_fence_has_completed(&sync
->fence
))
230 sync
->gl
.StatusFlag
= 1;
234 brw_init_syncobj_functions(struct dd_function_table
*functions
)
236 functions
->NewSyncObject
= brw_gl_new_sync
;
237 functions
->DeleteSyncObject
= brw_gl_delete_sync
;
238 functions
->FenceSync
= brw_gl_fence_sync
;
239 functions
->CheckSync
= brw_gl_check_sync
;
240 functions
->ClientWaitSync
= brw_gl_client_wait_sync
;
241 functions
->ServerWaitSync
= brw_gl_server_wait_sync
;
245 brw_dri_create_fence(__DRIcontext
*ctx
)
247 struct brw_context
*brw
= ctx
->driverPrivate
;
248 struct brw_fence
*fence
;
250 fence
= calloc(1, sizeof(*fence
));
254 brw_fence_init(brw
, fence
);
255 brw_fence_insert(brw
, fence
);
261 brw_dri_destroy_fence(__DRIscreen
*dri_screen
, void *driver_fence
)
263 struct brw_fence
*fence
= driver_fence
;
265 brw_fence_finish(fence
);
270 brw_dri_client_wait_sync(__DRIcontext
*ctx
, void *driver_fence
, unsigned flags
,
273 struct brw_fence
*fence
= driver_fence
;
275 return brw_fence_client_wait(fence
->brw
, fence
, timeout
);
279 brw_dri_server_wait_sync(__DRIcontext
*ctx
, void *driver_fence
, unsigned flags
)
281 struct brw_fence
*fence
= driver_fence
;
283 /* We might be called here with a NULL fence as a result of WaitSyncKHR
284 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
289 brw_fence_server_wait(fence
->brw
, fence
);
292 const __DRI2fenceExtension intelFenceExtension
= {
293 .base
= { __DRI2_FENCE
, 1 },
295 .create_fence
= brw_dri_create_fence
,
296 .destroy_fence
= brw_dri_destroy_fence
,
297 .client_wait_sync
= brw_dri_client_wait_sync
,
298 .server_wait_sync
= brw_dri_server_wait_sync
,
299 .get_fence_from_cl_event
= NULL
,