i965/drm: Rename drm_bacon_bo to brw_bo.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_sync.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * \file
30 * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
31 *
32 * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33 * reference on it. We can then check for completion or wait for completion
34 * using the normal buffer object mechanisms. This does mean that if an
35 * application is using many sync objects, it will emit small batchbuffers
36 * which may end up being a significant overhead. In other tests of removing
37 * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38 * performance bottleneck, though.
39 */
40
41 #include <libsync.h> /* Requires Android or libdrm-2.4.72 */
42
43 #include "main/imports.h"
44
45 #include "brw_context.h"
46 #include "intel_batchbuffer.h"
47
48 struct brw_fence {
49 struct brw_context *brw;
50
51 enum brw_fence_type {
52 /** The fence waits for completion of brw_fence::batch_bo. */
53 BRW_FENCE_TYPE_BO_WAIT,
54
55 /** The fence waits for brw_fence::sync_fd to signal. */
56 BRW_FENCE_TYPE_SYNC_FD,
57 } type;
58
59 union {
60 struct brw_bo *batch_bo;
61
62 /* This struct owns the fd. */
63 int sync_fd;
64 };
65
66 mtx_t mutex;
67 bool signalled;
68 };
69
70 struct brw_gl_sync {
71 struct gl_sync_object gl;
72 struct brw_fence fence;
73 };
74
75 static void
76 brw_fence_init(struct brw_context *brw, struct brw_fence *fence,
77 enum brw_fence_type type)
78 {
79 fence->brw = brw;
80 fence->type = type;
81 mtx_init(&fence->mutex, mtx_plain);
82
83 switch (type) {
84 case BRW_FENCE_TYPE_BO_WAIT:
85 fence->batch_bo = NULL;
86 break;
87 case BRW_FENCE_TYPE_SYNC_FD:
88 fence->sync_fd = -1;
89 break;
90 }
91 }
92
93 static void
94 brw_fence_finish(struct brw_fence *fence)
95 {
96 switch (fence->type) {
97 case BRW_FENCE_TYPE_BO_WAIT:
98 if (fence->batch_bo)
99 brw_bo_unreference(fence->batch_bo);
100 break;
101 case BRW_FENCE_TYPE_SYNC_FD:
102 if (fence->sync_fd != -1)
103 close(fence->sync_fd);
104 break;
105 }
106
107 mtx_destroy(&fence->mutex);
108 }
109
110 static bool MUST_CHECK
111 brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
112 {
113 brw_emit_mi_flush(brw);
114
115 switch (fence->type) {
116 case BRW_FENCE_TYPE_BO_WAIT:
117 assert(!fence->batch_bo);
118 assert(!fence->signalled);
119
120 fence->batch_bo = brw->batch.bo;
121 brw_bo_reference(fence->batch_bo);
122
123 if (intel_batchbuffer_flush(brw) < 0) {
124 brw_bo_unreference(fence->batch_bo);
125 fence->batch_bo = NULL;
126 return false;
127 }
128 break;
129 case BRW_FENCE_TYPE_SYNC_FD:
130 assert(!fence->signalled);
131
132 if (fence->sync_fd == -1) {
133 /* Create an out-fence that signals after all pending commands
134 * complete.
135 */
136 if (intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd) < 0)
137 return false;
138 assert(fence->sync_fd != -1);
139 } else {
140 /* Wait on the in-fence before executing any subsequently submitted
141 * commands.
142 */
143 if (intel_batchbuffer_flush(brw) < 0)
144 return false;
145
146 /* Emit a dummy batch just for the fence. */
147 brw_emit_mi_flush(brw);
148 if (intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL) < 0)
149 return false;
150 }
151 break;
152 }
153
154 return true;
155 }
156
157 static bool MUST_CHECK
158 brw_fence_insert(struct brw_context *brw, struct brw_fence *fence)
159 {
160 bool ret;
161
162 mtx_lock(&fence->mutex);
163 ret = brw_fence_insert_locked(brw, fence);
164 mtx_unlock(&fence->mutex);
165
166 return ret;
167 }
168
169 static bool
170 brw_fence_has_completed_locked(struct brw_fence *fence)
171 {
172 if (fence->signalled)
173 return true;
174
175 switch (fence->type) {
176 case BRW_FENCE_TYPE_BO_WAIT:
177 if (!fence->batch_bo) {
178 /* There may be no batch if intel_batchbuffer_flush() failed. */
179 return false;
180 }
181
182 if (brw_bo_busy(fence->batch_bo))
183 return false;
184
185 brw_bo_unreference(fence->batch_bo);
186 fence->batch_bo = NULL;
187 fence->signalled = true;
188
189 return true;
190
191 case BRW_FENCE_TYPE_SYNC_FD:
192 assert(fence->sync_fd != -1);
193
194 if (sync_wait(fence->sync_fd, 0) == -1)
195 return false;
196
197 fence->signalled = true;
198
199 return true;
200 }
201
202 return false;
203 }
204
205 static bool
206 brw_fence_has_completed(struct brw_fence *fence)
207 {
208 bool ret;
209
210 mtx_lock(&fence->mutex);
211 ret = brw_fence_has_completed_locked(fence);
212 mtx_unlock(&fence->mutex);
213
214 return ret;
215 }
216
217 static bool
218 brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
219 uint64_t timeout)
220 {
221 int32_t timeout_i32;
222
223 if (fence->signalled)
224 return true;
225
226 switch (fence->type) {
227 case BRW_FENCE_TYPE_BO_WAIT:
228 if (!fence->batch_bo) {
229 /* There may be no batch if intel_batchbuffer_flush() failed. */
230 return false;
231 }
232
233 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
234 * immediately for timeouts <= 0. The best we can do is to clamp the
235 * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
236 * 292 years - likely not a big deal.
237 */
238 if (timeout > INT64_MAX)
239 timeout = INT64_MAX;
240
241 if (brw_bo_wait(fence->batch_bo, timeout) != 0)
242 return false;
243
244 fence->signalled = true;
245 brw_bo_unreference(fence->batch_bo);
246 fence->batch_bo = NULL;
247
248 return true;
249 case BRW_FENCE_TYPE_SYNC_FD:
250 if (fence->sync_fd == -1)
251 return false;
252
253 if (timeout > INT32_MAX)
254 timeout_i32 = -1;
255 else
256 timeout_i32 = timeout;
257
258 if (sync_wait(fence->sync_fd, timeout_i32) == -1)
259 return false;
260
261 fence->signalled = true;
262 return true;
263 }
264
265 assert(!"bad enum brw_fence_type");
266 return false;
267 }
268
269 /**
270 * Return true if the function successfully signals or has already signalled.
271 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
272 */
273 static bool
274 brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
275 uint64_t timeout)
276 {
277 bool ret;
278
279 mtx_lock(&fence->mutex);
280 ret = brw_fence_client_wait_locked(brw, fence, timeout);
281 mtx_unlock(&fence->mutex);
282
283 return ret;
284 }
285
286 static void
287 brw_fence_server_wait(struct brw_context *brw, struct brw_fence *fence)
288 {
289 switch (fence->type) {
290 case BRW_FENCE_TYPE_BO_WAIT:
291 /* We have nothing to do for WaitSync. Our GL command stream is sequential,
292 * so given that the sync object has already flushed the batchbuffer, any
293 * batchbuffers coming after this waitsync will naturally not occur until
294 * the previous one is done.
295 */
296 break;
297 case BRW_FENCE_TYPE_SYNC_FD:
298 assert(fence->sync_fd != -1);
299
300 /* The user wants explicit synchronization, so give them what they want. */
301 if (!brw_fence_insert(brw, fence)) {
302 /* FIXME: There exists no way yet to report an error here. If an error
303 * occurs, continue silently and hope for the best.
304 */
305 }
306 break;
307 }
308 }
309
310 static struct gl_sync_object *
311 brw_gl_new_sync(struct gl_context *ctx, GLuint id)
312 {
313 struct brw_gl_sync *sync;
314
315 sync = calloc(1, sizeof(*sync));
316 if (!sync)
317 return NULL;
318
319 return &sync->gl;
320 }
321
322 static void
323 brw_gl_delete_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
324 {
325 struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
326
327 brw_fence_finish(&sync->fence);
328 free(sync);
329 }
330
331 static void
332 brw_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
333 GLenum condition, GLbitfield flags)
334 {
335 struct brw_context *brw = brw_context(ctx);
336 struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
337
338 brw_fence_init(brw, &sync->fence, BRW_FENCE_TYPE_BO_WAIT);
339
340 if (!brw_fence_insert_locked(brw, &sync->fence)) {
341 /* FIXME: There exists no way to report a GL error here. If an error
342 * occurs, continue silently and hope for the best.
343 */
344 }
345 }
346
347 static void
348 brw_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
349 GLbitfield flags, GLuint64 timeout)
350 {
351 struct brw_context *brw = brw_context(ctx);
352 struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
353
354 if (brw_fence_client_wait(brw, &sync->fence, timeout))
355 sync->gl.StatusFlag = 1;
356 }
357
358 static void
359 brw_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *_sync,
360 GLbitfield flags, GLuint64 timeout)
361 {
362 struct brw_context *brw = brw_context(ctx);
363 struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
364
365 brw_fence_server_wait(brw, &sync->fence);
366 }
367
368 static void
369 brw_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *_sync)
370 {
371 struct brw_gl_sync *sync = (struct brw_gl_sync *) _sync;
372
373 if (brw_fence_has_completed(&sync->fence))
374 sync->gl.StatusFlag = 1;
375 }
376
377 void
378 brw_init_syncobj_functions(struct dd_function_table *functions)
379 {
380 functions->NewSyncObject = brw_gl_new_sync;
381 functions->DeleteSyncObject = brw_gl_delete_sync;
382 functions->FenceSync = brw_gl_fence_sync;
383 functions->CheckSync = brw_gl_check_sync;
384 functions->ClientWaitSync = brw_gl_client_wait_sync;
385 functions->ServerWaitSync = brw_gl_server_wait_sync;
386 }
387
388 static void *
389 brw_dri_create_fence(__DRIcontext *ctx)
390 {
391 struct brw_context *brw = ctx->driverPrivate;
392 struct brw_fence *fence;
393
394 fence = calloc(1, sizeof(*fence));
395 if (!fence)
396 return NULL;
397
398 brw_fence_init(brw, fence, BRW_FENCE_TYPE_BO_WAIT);
399
400 if (!brw_fence_insert_locked(brw, fence)) {
401 brw_fence_finish(fence);
402 free(fence);
403 return NULL;
404 }
405
406 return fence;
407 }
408
409 static void
410 brw_dri_destroy_fence(__DRIscreen *dri_screen, void *_fence)
411 {
412 struct brw_fence *fence = _fence;
413
414 brw_fence_finish(fence);
415 free(fence);
416 }
417
418 static GLboolean
419 brw_dri_client_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags,
420 uint64_t timeout)
421 {
422 struct brw_fence *fence = _fence;
423
424 return brw_fence_client_wait(fence->brw, fence, timeout);
425 }
426
427 static void
428 brw_dri_server_wait_sync(__DRIcontext *ctx, void *_fence, unsigned flags)
429 {
430 struct brw_fence *fence = _fence;
431
432 /* We might be called here with a NULL fence as a result of WaitSyncKHR
433 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
434 */
435 if (!fence)
436 return;
437
438 brw_fence_server_wait(fence->brw, fence);
439 }
440
441 static unsigned
442 brw_dri_get_capabilities(__DRIscreen *dri_screen)
443 {
444 struct intel_screen *screen = dri_screen->driverPrivate;
445 unsigned caps = 0;
446
447 if (screen->has_exec_fence)
448 caps |= __DRI_FENCE_CAP_NATIVE_FD;
449
450 return caps;
451 }
452
453 static void *
454 brw_dri_create_fence_fd(__DRIcontext *dri_ctx, int fd)
455 {
456 struct brw_context *brw = dri_ctx->driverPrivate;
457 struct brw_fence *fence;
458
459 assert(brw->screen->has_exec_fence);
460
461 fence = calloc(1, sizeof(*fence));
462 if (!fence)
463 return NULL;
464
465 brw_fence_init(brw, fence, BRW_FENCE_TYPE_SYNC_FD);
466
467 if (fd == -1) {
468 /* Create an out-fence fd */
469 if (!brw_fence_insert_locked(brw, fence))
470 goto fail;
471 } else {
472 /* Import the sync fd as an in-fence. */
473 fence->sync_fd = fd;
474 }
475
476 assert(fence->sync_fd != -1);
477
478 return fence;
479
480 fail:
481 brw_fence_finish(fence);
482 free(fence);
483 return NULL;
484 }
485
486 static int
487 brw_dri_get_fence_fd_locked(struct brw_fence *fence)
488 {
489 assert(fence->type == BRW_FENCE_TYPE_SYNC_FD);
490 return dup(fence->sync_fd);
491 }
492
493 static int
494 brw_dri_get_fence_fd(__DRIscreen *dri_screen, void *_fence)
495 {
496 struct brw_fence *fence = _fence;
497 int fd;
498
499 mtx_lock(&fence->mutex);
500 fd = brw_dri_get_fence_fd_locked(fence);
501 mtx_unlock(&fence->mutex);
502
503 return fd;
504 }
505
506 const __DRI2fenceExtension intelFenceExtension = {
507 .base = { __DRI2_FENCE, 2 },
508
509 .create_fence = brw_dri_create_fence,
510 .destroy_fence = brw_dri_destroy_fence,
511 .client_wait_sync = brw_dri_client_wait_sync,
512 .server_wait_sync = brw_dri_server_wait_sync,
513 .get_fence_from_cl_event = NULL,
514 .get_capabilities = brw_dri_get_capabilities,
515 .create_fence_fd = brw_dri_create_fence_fd,
516 .get_fence_fd = brw_dri_get_fence_fd,
517 };