Import texmem i915 driver to its new location as i915tex.
[mesa.git] / src / mesa / drivers / dri / i915tex / intel_batchpool.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30 */
31
32 #include <xf86drm.h>
33 #include <stdlib.h>
34 #include <errno.h>
35 #include "imports.h"
36 #include "glthread.h"
37 #include "dri_bufpool.h"
38 #include "dri_bufmgr.h"
39 #include "intel_screen.h"
40
41 typedef struct
42 {
43 drmMMListHead head;
44 struct _BPool *parent;
45 struct _DriFenceObject *fence;
46 unsigned long start;
47 int unfenced;
48 int mapped;
49 } BBuf;
50
51 typedef struct _BPool
52 {
53 _glthread_Mutex mutex;
54 unsigned long bufSize;
55 unsigned poolSize;
56 unsigned numFree;
57 unsigned numTot;
58 unsigned numDelayed;
59 unsigned checkDelayed;
60 drmMMListHead free;
61 drmMMListHead delayed;
62 drmMMListHead head;
63 drmBO kernelBO;
64 void *virtual;
65 BBuf *bufs;
66 } BPool;
67
68
69 static BPool *
70 createBPool(int fd, unsigned long bufSize, unsigned numBufs, unsigned flags,
71 unsigned checkDelayed)
72 {
73 BPool *p = (BPool *) malloc(sizeof(*p));
74 BBuf *buf;
75 int i;
76
77 if (!p)
78 return NULL;
79
80 p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
81 if (!p->bufs) {
82 free(p);
83 return NULL;
84 }
85
86 DRMINITLISTHEAD(&p->free);
87 DRMINITLISTHEAD(&p->head);
88 DRMINITLISTHEAD(&p->delayed);
89
90 p->numTot = numBufs;
91 p->numFree = numBufs;
92 p->bufSize = bufSize;
93 p->numDelayed = 0;
94 p->checkDelayed = checkDelayed;
95
96 _glthread_INIT_MUTEX(p->mutex);
97
98 if (drmBOCreate(fd, 0, numBufs * bufSize, 0, NULL, drm_bo_type_dc,
99 flags, 0, &p->kernelBO)) {
100 free(p->bufs);
101 free(p);
102 return NULL;
103 }
104 if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
105 &p->virtual)) {
106 drmBODestroy(fd, &p->kernelBO);
107 free(p->bufs);
108 free(p);
109 return NULL;
110 }
111
112 /*
113 * We unmap the buffer so that we can validate it later. Note that this is
114 * just a synchronizing operation. The buffer will have a virtual mapping
115 * until it is destroyed.
116 */
117
118 drmBOUnmap(fd, &p->kernelBO);
119
120 buf = p->bufs;
121 for (i = 0; i < numBufs; ++i) {
122 buf->parent = p;
123 buf->fence = NULL;
124 buf->start = i * bufSize;
125 buf->mapped = 0;
126 buf->unfenced = 0;
127 DRMLISTADDTAIL(&buf->head, &p->free);
128 buf++;
129 }
130
131 return p;
132 }
133
134
135 static void
136 pool_checkFree(BPool * p, int wait)
137 {
138 drmMMListHead *list, *prev;
139 BBuf *buf;
140 int signaled = 0;
141 int i;
142
143 list = p->delayed.next;
144
145 if (p->numDelayed > 3) {
146 for (i = 0; i < p->numDelayed; i += 3) {
147 list = list->next;
148 }
149 }
150
151 prev = list->prev;
152 for (; list != &p->delayed; list = prev, prev = list->prev) {
153
154 buf = DRMLISTENTRY(BBuf, list, head);
155
156 if (!signaled) {
157 if (wait) {
158 driFenceFinish(buf->fence, DRM_FENCE_TYPE_EXE, 1);
159 signaled = 1;
160 }
161 else {
162 signaled = driFenceSignaled(buf->fence, DRM_FENCE_TYPE_EXE);
163 }
164 }
165
166 if (!signaled)
167 break;
168
169 driFenceUnReference(buf->fence);
170 buf->fence = NULL;
171 DRMLISTDEL(list);
172 p->numDelayed--;
173 DRMLISTADD(list, &p->free);
174 p->numFree++;
175 }
176 }
177
178 static void *
179 pool_create(struct _DriBufferPool *pool,
180 unsigned long size, unsigned flags, unsigned hint,
181 unsigned alignment)
182 {
183 BPool *p = (BPool *) pool->data;
184
185 drmMMListHead *item;
186
187 if (alignment && (alignment != 4096))
188 return NULL;
189
190 _glthread_LOCK_MUTEX(p->mutex);
191
192 if (p->numFree == 0)
193 pool_checkFree(p, GL_TRUE);
194
195 if (p->numFree == 0) {
196 fprintf(stderr, "Out of fixed size buffer objects\n");
197 BM_CKFATAL(-ENOMEM);
198 }
199
200 item = p->free.next;
201
202 if (item == &p->free) {
203 fprintf(stderr, "Fixed size buffer pool corruption\n");
204 }
205
206 DRMLISTDEL(item);
207 --p->numFree;
208
209 _glthread_UNLOCK_MUTEX(p->mutex);
210 return (void *) DRMLISTENTRY(BBuf, item, head);
211 }
212
213
214 static int
215 pool_destroy(struct _DriBufferPool *pool, void *private)
216 {
217 BBuf *buf = (BBuf *) private;
218 BPool *p = buf->parent;
219
220 _glthread_LOCK_MUTEX(p->mutex);
221
222 if (buf->fence) {
223 DRMLISTADDTAIL(&buf->head, &p->delayed);
224 p->numDelayed++;
225 }
226 else {
227 buf->unfenced = 0;
228 DRMLISTADD(&buf->head, &p->free);
229 p->numFree++;
230 }
231
232 if ((p->numDelayed % p->checkDelayed) == 0)
233 pool_checkFree(p, 0);
234
235 _glthread_UNLOCK_MUTEX(p->mutex);
236 return 0;
237 }
238
239
240 static int
241 pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
242 int hint, void **virtual)
243 {
244
245 BBuf *buf = (BBuf *) private;
246 BPool *p = buf->parent;
247
248 _glthread_LOCK_MUTEX(p->mutex);
249
250 /*
251 * Currently Mesa doesn't have any condition variables to resolve this
252 * cleanly in a multithreading environment.
253 * We bail out instead.
254 */
255
256 if (buf->mapped) {
257 fprintf(stderr, "Trying to map already mapped buffer object\n");
258 BM_CKFATAL(-EINVAL);
259 }
260
261 #if 0
262 if (buf->unfenced && !(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
263 fprintf(stderr, "Trying to map an unfenced buffer object 0x%08x"
264 " 0x%08x %d\n", hint, flags, buf->start);
265 BM_CKFATAL(-EINVAL);
266 }
267
268 #endif
269
270 if (buf->fence) {
271 _glthread_UNLOCK_MUTEX(p->mutex);
272 return -EBUSY;
273 }
274
275 buf->mapped = GL_TRUE;
276 *virtual = (unsigned char *) p->virtual + buf->start;
277 _glthread_UNLOCK_MUTEX(p->mutex);
278 return 0;
279 }
280
281 static int
282 pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
283 {
284 BBuf *buf = (BBuf *) private;
285 driFenceFinish(buf->fence, 0, lazy);
286 return 0;
287 }
288
289 static int
290 pool_unmap(struct _DriBufferPool *pool, void *private)
291 {
292 BBuf *buf = (BBuf *) private;
293
294 buf->mapped = 0;
295 return 0;
296 }
297
298 static unsigned long
299 pool_offset(struct _DriBufferPool *pool, void *private)
300 {
301 BBuf *buf = (BBuf *) private;
302 BPool *p = buf->parent;
303
304 return p->kernelBO.offset + buf->start;
305 }
306
307 static unsigned
308 pool_flags(struct _DriBufferPool *pool, void *private)
309 {
310 BPool *p = (BPool *) pool->data;
311
312 return p->kernelBO.flags;
313 }
314
315 static unsigned long
316 pool_size(struct _DriBufferPool *pool, void *private)
317 {
318 BPool *p = (BPool *) pool->data;
319
320 return p->bufSize;
321 }
322
323
324 static int
325 pool_fence(struct _DriBufferPool *pool, void *private,
326 struct _DriFenceObject *fence)
327 {
328 BBuf *buf = (BBuf *) private;
329 BPool *p = buf->parent;
330
331 _glthread_LOCK_MUTEX(p->mutex);
332 if (buf->fence) {
333 driFenceUnReference(buf->fence);
334 }
335 buf->fence = fence;
336 buf->unfenced = 0;
337 driFenceReference(buf->fence);
338 _glthread_UNLOCK_MUTEX(p->mutex);
339
340 return 0;
341 }
342
343 static drmBO *
344 pool_kernel(struct _DriBufferPool *pool, void *private)
345 {
346 BBuf *buf = (BBuf *) private;
347 BPool *p = buf->parent;
348
349 return &p->kernelBO;
350 }
351
352 static int
353 pool_validate(struct _DriBufferPool *pool, void *private)
354 {
355 BBuf *buf = (BBuf *) private;
356 BPool *p = buf->parent;
357 _glthread_LOCK_MUTEX(p->mutex);
358 buf->unfenced = GL_TRUE;
359 _glthread_UNLOCK_MUTEX(p->mutex);
360 return 0;
361 }
362
363 static void
364 pool_takedown(struct _DriBufferPool *pool)
365 {
366 BPool *p = (BPool *) pool->data;
367
368 /*
369 * Wait on outstanding fences.
370 */
371
372 _glthread_LOCK_MUTEX(p->mutex);
373 while ((p->numFree < p->numTot) && p->numDelayed) {
374 _glthread_UNLOCK_MUTEX(p->mutex);
375 sched_yield();
376 pool_checkFree(p, GL_TRUE);
377 _glthread_LOCK_MUTEX(p->mutex);
378 }
379
380 drmBODestroy(pool->fd, &p->kernelBO);
381 free(p->bufs);
382 _glthread_UNLOCK_MUTEX(p->mutex);
383 free(p);
384 free(pool);
385 }
386
387
388 struct _DriBufferPool *
389 driBatchPoolInit(int fd, unsigned flags,
390 unsigned long bufSize,
391 unsigned numBufs, unsigned checkDelayed)
392 {
393 struct _DriBufferPool *pool;
394
395 pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
396 if (!pool)
397 return NULL;
398
399 pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed);
400 if (!pool->data)
401 return NULL;
402
403 pool->fd = fd;
404 pool->map = &pool_map;
405 pool->unmap = &pool_unmap;
406 pool->destroy = &pool_destroy;
407 pool->offset = &pool_offset;
408 pool->flags = &pool_flags;
409 pool->size = &pool_size;
410 pool->create = &pool_create;
411 pool->fence = &pool_fence;
412 pool->kernel = &pool_kernel;
413 pool->validate = &pool_validate;
414 pool->waitIdle = &pool_waitIdle;
415 pool->setstatic = NULL;
416 pool->takeDown = &pool_takedown;
417 return pool;
418 }