b5e363872ad5b100b6e34cb55e40c56548fe8635
[mesa.git] / src / mesa / drivers / dri / i915 / intel_syncobj.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * \file
30 * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
31 *
32 * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33 * reference on it. We can then check for completion or wait for completion
34 * using the normal buffer object mechanisms. This does mean that if an
35 * application is using many sync objects, it will emit small batchbuffers
36 * which may end up being a significant overhead. In other tests of removing
37 * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38 * performance bottleneck, though.
39 */
40
41 #include "util/imports.h"
42
43 #include "intel_context.h"
44 #include "intel_batchbuffer.h"
45 #include "intel_reg.h"
46
47 struct intel_fence {
48 struct intel_context *intel;
49 /** The fence waits for completion of this batch. */
50 drm_intel_bo *batch_bo;
51
52 mtx_t mutex;
53 bool signalled;
54 };
55
56 struct intel_gl_sync_object {
57 struct gl_sync_object Base;
58 struct intel_fence fence;
59 };
60
61 static void
62 intel_fence_finish(struct intel_fence *fence)
63 {
64 if (fence->batch_bo)
65 drm_intel_bo_unreference(fence->batch_bo);
66 }
67
68 static void
69 intel_fence_insert(struct intel_context *intel, struct intel_fence *fence)
70 {
71 assert(!fence->batch_bo);
72 assert(!fence->signalled);
73
74 intel_batchbuffer_emit_mi_flush(intel);
75 fence->batch_bo = intel->batch.bo;
76 drm_intel_bo_reference(fence->batch_bo);
77 intel_batchbuffer_flush(intel);
78 }
79
80 static bool
81 intel_fence_has_completed_locked(struct intel_fence *fence)
82 {
83 if (fence->signalled)
84 return true;
85
86 if (fence->batch_bo && !drm_intel_bo_busy(fence->batch_bo)) {
87 drm_intel_bo_unreference(fence->batch_bo);
88 fence->batch_bo = NULL;
89 fence->signalled = true;
90 return true;
91 }
92
93 return false;
94 }
95
96 static bool
97 intel_fence_has_completed(struct intel_fence *fence)
98 {
99 bool ret;
100
101 mtx_lock(&fence->mutex);
102 ret = intel_fence_has_completed_locked(fence);
103 mtx_unlock(&fence->mutex);
104
105 return ret;
106 }
107
108 static bool
109 intel_fence_client_wait_locked(struct intel_context *intel, struct intel_fence *fence,
110 uint64_t timeout)
111 {
112 if (fence->signalled)
113 return true;
114
115 assert(fence->batch_bo);
116
117 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
118 * immediately for timeouts <= 0. The best we can do is to clamp the
119 * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
120 * 292 years - likely not a big deal.
121 */
122 if (timeout > INT64_MAX)
123 timeout = INT64_MAX;
124
125 if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
126 return false;
127
128 fence->signalled = true;
129 drm_intel_bo_unreference(fence->batch_bo);
130 fence->batch_bo = NULL;
131
132 return true;
133 }
134
135 /**
136 * Return true if the function successfully signals or has already signalled.
137 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
138 */
139 static bool
140 intel_fence_client_wait(struct intel_context *intel, struct intel_fence *fence,
141 uint64_t timeout)
142 {
143 bool ret;
144
145 mtx_lock(&fence->mutex);
146 ret = intel_fence_client_wait_locked(intel, fence, timeout);
147 mtx_unlock(&fence->mutex);
148
149 return ret;
150 }
151
152 static void
153 intel_fence_server_wait(struct intel_context *intel, struct intel_fence *fence)
154 {
155 /* We have nothing to do for WaitSync. Our GL command stream is sequential,
156 * so given that the sync object has already flushed the batchbuffer, any
157 * batchbuffers coming after this waitsync will naturally not occur until
158 * the previous one is done.
159 */
160 }
161
162 static struct gl_sync_object *
163 intel_gl_new_sync_object(struct gl_context *ctx)
164 {
165 struct intel_gl_sync_object *sync;
166
167 sync = calloc(1, sizeof(*sync));
168 if (!sync)
169 return NULL;
170
171 return &sync->Base;
172 }
173
174 static void
175 intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
176 {
177 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
178
179 intel_fence_finish(&sync->fence);
180 free(sync);
181 }
182
183 static void
184 intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
185 GLenum condition, GLbitfield flags)
186 {
187 struct intel_context *intel = intel_context(ctx);
188 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
189
190 intel_fence_insert(intel, &sync->fence);
191 }
192
193 static void
194 intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
195 GLbitfield flags, GLuint64 timeout)
196 {
197 struct intel_context *intel = intel_context(ctx);
198 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
199
200 if (intel_fence_client_wait(intel, &sync->fence, timeout))
201 s->StatusFlag = 1;
202 }
203
204 static void
205 intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
206 GLbitfield flags, GLuint64 timeout)
207 {
208 struct intel_context *intel = intel_context(ctx);
209 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
210
211 intel_fence_server_wait(intel, &sync->fence);
212 }
213
214 static void
215 intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
216 {
217 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
218
219 if (intel_fence_has_completed(&sync->fence))
220 s->StatusFlag = 1;
221 }
222
223 void
224 intel_init_syncobj_functions(struct dd_function_table *functions)
225 {
226 functions->NewSyncObject = intel_gl_new_sync_object;
227 functions->DeleteSyncObject = intel_gl_delete_sync_object;
228 functions->FenceSync = intel_gl_fence_sync;
229 functions->CheckSync = intel_gl_check_sync;
230 functions->ClientWaitSync = intel_gl_client_wait_sync;
231 functions->ServerWaitSync = intel_gl_server_wait_sync;
232 }
233
234 static void *
235 intel_dri_create_fence(__DRIcontext *ctx)
236 {
237 struct intel_context *intel = ctx->driverPrivate;
238 struct intel_fence *fence;
239
240 fence = calloc(1, sizeof(*fence));
241 if (!fence)
242 return NULL;
243
244 mtx_init(&fence->mutex, mtx_plain);
245 fence->intel = intel;
246 intel_fence_insert(intel, fence);
247
248 return fence;
249 }
250
251 static void
252 intel_dri_destroy_fence(__DRIscreen *screen, void *driver_fence)
253 {
254 struct intel_fence *fence = driver_fence;
255
256 intel_fence_finish(fence);
257 free(fence);
258 }
259
260 static GLboolean
261 intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags,
262 uint64_t timeout)
263 {
264 struct intel_fence *fence = driver_fence;
265
266 return intel_fence_client_wait(fence->intel, fence, timeout);
267 }
268
269 static void
270 intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags)
271 {
272 struct intel_fence *fence = driver_fence;
273
274 /* We might be called here with a NULL fence as a result of WaitSyncKHR
275 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
276 */
277 if (!fence)
278 return;
279
280 intel_fence_server_wait(fence->intel, fence);
281 }
282
283 const __DRI2fenceExtension intelFenceExtension = {
284 .base = { __DRI2_FENCE, 1 },
285
286 .create_fence = intel_dri_create_fence,
287 .destroy_fence = intel_dri_destroy_fence,
288 .client_wait_sync = intel_dri_client_wait_sync,
289 .server_wait_sync = intel_dri_server_wait_sync,
290 .get_fence_from_cl_event = NULL,
291 };