remove final imports.h and imports.c bits
[mesa.git] / src / mesa / drivers / dri / i915 / intel_syncobj.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * \file
30 * \brief Support for GL_ARB_sync and EGL_KHR_fence_sync.
31 *
32 * GL_ARB_sync is implemented by flushing the current batchbuffer and keeping a
33 * reference on it. We can then check for completion or wait for completion
34 * using the normal buffer object mechanisms. This does mean that if an
35 * application is using many sync objects, it will emit small batchbuffers
36 * which may end up being a significant overhead. In other tests of removing
37 * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
38 * performance bottleneck, though.
39 */
40
41 #include "intel_context.h"
42 #include "intel_batchbuffer.h"
43 #include "intel_reg.h"
44
45 struct intel_fence {
46 struct intel_context *intel;
47 /** The fence waits for completion of this batch. */
48 drm_intel_bo *batch_bo;
49
50 mtx_t mutex;
51 bool signalled;
52 };
53
54 struct intel_gl_sync_object {
55 struct gl_sync_object Base;
56 struct intel_fence fence;
57 };
58
59 static void
60 intel_fence_finish(struct intel_fence *fence)
61 {
62 if (fence->batch_bo)
63 drm_intel_bo_unreference(fence->batch_bo);
64 }
65
66 static void
67 intel_fence_insert(struct intel_context *intel, struct intel_fence *fence)
68 {
69 assert(!fence->batch_bo);
70 assert(!fence->signalled);
71
72 intel_batchbuffer_emit_mi_flush(intel);
73 fence->batch_bo = intel->batch.bo;
74 drm_intel_bo_reference(fence->batch_bo);
75 intel_batchbuffer_flush(intel);
76 }
77
78 static bool
79 intel_fence_has_completed_locked(struct intel_fence *fence)
80 {
81 if (fence->signalled)
82 return true;
83
84 if (fence->batch_bo && !drm_intel_bo_busy(fence->batch_bo)) {
85 drm_intel_bo_unreference(fence->batch_bo);
86 fence->batch_bo = NULL;
87 fence->signalled = true;
88 return true;
89 }
90
91 return false;
92 }
93
94 static bool
95 intel_fence_has_completed(struct intel_fence *fence)
96 {
97 bool ret;
98
99 mtx_lock(&fence->mutex);
100 ret = intel_fence_has_completed_locked(fence);
101 mtx_unlock(&fence->mutex);
102
103 return ret;
104 }
105
106 static bool
107 intel_fence_client_wait_locked(struct intel_context *intel, struct intel_fence *fence,
108 uint64_t timeout)
109 {
110 if (fence->signalled)
111 return true;
112
113 assert(fence->batch_bo);
114
115 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
116 * immediately for timeouts <= 0. The best we can do is to clamp the
117 * timeout to INT64_MAX. This limits the maximum timeout from 584 years to
118 * 292 years - likely not a big deal.
119 */
120 if (timeout > INT64_MAX)
121 timeout = INT64_MAX;
122
123 if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
124 return false;
125
126 fence->signalled = true;
127 drm_intel_bo_unreference(fence->batch_bo);
128 fence->batch_bo = NULL;
129
130 return true;
131 }
132
133 /**
134 * Return true if the function successfully signals or has already signalled.
135 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
136 */
137 static bool
138 intel_fence_client_wait(struct intel_context *intel, struct intel_fence *fence,
139 uint64_t timeout)
140 {
141 bool ret;
142
143 mtx_lock(&fence->mutex);
144 ret = intel_fence_client_wait_locked(intel, fence, timeout);
145 mtx_unlock(&fence->mutex);
146
147 return ret;
148 }
149
150 static void
151 intel_fence_server_wait(struct intel_context *intel, struct intel_fence *fence)
152 {
153 /* We have nothing to do for WaitSync. Our GL command stream is sequential,
154 * so given that the sync object has already flushed the batchbuffer, any
155 * batchbuffers coming after this waitsync will naturally not occur until
156 * the previous one is done.
157 */
158 }
159
160 static struct gl_sync_object *
161 intel_gl_new_sync_object(struct gl_context *ctx)
162 {
163 struct intel_gl_sync_object *sync;
164
165 sync = calloc(1, sizeof(*sync));
166 if (!sync)
167 return NULL;
168
169 return &sync->Base;
170 }
171
172 static void
173 intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
174 {
175 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
176
177 intel_fence_finish(&sync->fence);
178 free(sync);
179 }
180
181 static void
182 intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
183 GLenum condition, GLbitfield flags)
184 {
185 struct intel_context *intel = intel_context(ctx);
186 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
187
188 intel_fence_insert(intel, &sync->fence);
189 }
190
191 static void
192 intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
193 GLbitfield flags, GLuint64 timeout)
194 {
195 struct intel_context *intel = intel_context(ctx);
196 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
197
198 if (intel_fence_client_wait(intel, &sync->fence, timeout))
199 s->StatusFlag = 1;
200 }
201
202 static void
203 intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
204 GLbitfield flags, GLuint64 timeout)
205 {
206 struct intel_context *intel = intel_context(ctx);
207 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
208
209 intel_fence_server_wait(intel, &sync->fence);
210 }
211
212 static void
213 intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
214 {
215 struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
216
217 if (intel_fence_has_completed(&sync->fence))
218 s->StatusFlag = 1;
219 }
220
221 void
222 intel_init_syncobj_functions(struct dd_function_table *functions)
223 {
224 functions->NewSyncObject = intel_gl_new_sync_object;
225 functions->DeleteSyncObject = intel_gl_delete_sync_object;
226 functions->FenceSync = intel_gl_fence_sync;
227 functions->CheckSync = intel_gl_check_sync;
228 functions->ClientWaitSync = intel_gl_client_wait_sync;
229 functions->ServerWaitSync = intel_gl_server_wait_sync;
230 }
231
232 static void *
233 intel_dri_create_fence(__DRIcontext *ctx)
234 {
235 struct intel_context *intel = ctx->driverPrivate;
236 struct intel_fence *fence;
237
238 fence = calloc(1, sizeof(*fence));
239 if (!fence)
240 return NULL;
241
242 mtx_init(&fence->mutex, mtx_plain);
243 fence->intel = intel;
244 intel_fence_insert(intel, fence);
245
246 return fence;
247 }
248
249 static void
250 intel_dri_destroy_fence(__DRIscreen *screen, void *driver_fence)
251 {
252 struct intel_fence *fence = driver_fence;
253
254 intel_fence_finish(fence);
255 free(fence);
256 }
257
258 static GLboolean
259 intel_dri_client_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags,
260 uint64_t timeout)
261 {
262 struct intel_fence *fence = driver_fence;
263
264 return intel_fence_client_wait(fence->intel, fence, timeout);
265 }
266
267 static void
268 intel_dri_server_wait_sync(__DRIcontext *ctx, void *driver_fence, unsigned flags)
269 {
270 struct intel_fence *fence = driver_fence;
271
272 /* We might be called here with a NULL fence as a result of WaitSyncKHR
273 * on a EGL_KHR_reusable_sync fence. Nothing to do here in such case.
274 */
275 if (!fence)
276 return;
277
278 intel_fence_server_wait(fence->intel, fence);
279 }
280
281 const __DRI2fenceExtension intelFenceExtension = {
282 .base = { __DRI2_FENCE, 1 },
283
284 .create_fence = intel_dri_create_fence,
285 .destroy_fence = intel_dri_destroy_fence,
286 .client_wait_sync = intel_dri_client_wait_sync,
287 .server_wait_sync = intel_dri_server_wait_sync,
288 .get_fence_from_cl_event = NULL,
289 };