Merge commit 'origin/gallium-master-merge'
[mesa.git] / src / gallium / winsys / drm / intel / common / ws_dri_slabpool.c
1 /**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 */
31
32 #include <stdint.h>
33 #include <sys/time.h>
34 #include <errno.h>
35 #include <unistd.h>
36 #include <assert.h>
37 #include "ws_dri_bufpool.h"
38 #include "ws_dri_fencemgr.h"
39 #include "ws_dri_bufmgr.h"
40 #include "pipe/p_thread.h"
41
42 #define DRI_SLABPOOL_ALLOC_RETRIES 100
43
44 struct _DriSlab;
45
46 struct _DriSlabBuffer {
47 int isSlabBuffer;
48 drmBO *bo;
49 struct _DriFenceObject *fence;
50 struct _DriSlab *parent;
51 drmMMListHead head;
52 uint32_t mapCount;
53 uint32_t start;
54 uint32_t fenceType;
55 int unFenced;
56 pipe_condvar event;
57 };
58
59 struct _DriKernelBO {
60 int fd;
61 drmBO bo;
62 drmMMListHead timeoutHead;
63 drmMMListHead head;
64 struct timeval timeFreed;
65 uint32_t pageAlignment;
66 void *virtual;
67 };
68
69 struct _DriSlab{
70 drmMMListHead head;
71 drmMMListHead freeBuffers;
72 uint32_t numBuffers;
73 uint32_t numFree;
74 struct _DriSlabBuffer *buffers;
75 struct _DriSlabSizeHeader *header;
76 struct _DriKernelBO *kbo;
77 };
78
79
80 struct _DriSlabSizeHeader {
81 drmMMListHead slabs;
82 drmMMListHead freeSlabs;
83 drmMMListHead delayedBuffers;
84 uint32_t numDelayed;
85 struct _DriSlabPool *slabPool;
86 uint32_t bufSize;
87 pipe_mutex mutex;
88 };
89
90 struct _DriFreeSlabManager {
91 struct timeval slabTimeout;
92 struct timeval checkInterval;
93 struct timeval nextCheck;
94 drmMMListHead timeoutList;
95 drmMMListHead unCached;
96 drmMMListHead cached;
97 pipe_mutex mutex;
98 };
99
100
101 struct _DriSlabPool {
102
103 /*
104 * The data of this structure remains constant after
105 * initialization and thus needs no mutex protection.
106 */
107
108 struct _DriFreeSlabManager *fMan;
109 uint64_t proposedFlags;
110 uint64_t validMask;
111 uint32_t *bucketSizes;
112 uint32_t numBuckets;
113 uint32_t pageSize;
114 int fd;
115 int pageAlignment;
116 int maxSlabSize;
117 int desiredNumBuffers;
118 struct _DriSlabSizeHeader *headers;
119 };
120
121 /*
122 * FIXME: Perhaps arrange timeout slabs in size buckets for fast
123 * retreival??
124 */
125
126
127 static inline int
128 driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
129 {
130 return ((arg1->tv_sec > arg2->tv_sec) ||
131 ((arg1->tv_sec == arg2->tv_sec) &&
132 (arg1->tv_usec > arg2->tv_usec)));
133 }
134
135 static inline void
136 driTimeAdd(struct timeval *arg, struct timeval *add)
137 {
138 unsigned int sec;
139
140 arg->tv_sec += add->tv_sec;
141 arg->tv_usec += add->tv_usec;
142 sec = arg->tv_usec / 1000000;
143 arg->tv_sec += sec;
144 arg->tv_usec -= sec*1000000;
145 }
146
147 static void
148 driFreeKernelBO(struct _DriKernelBO *kbo)
149 {
150 if (!kbo)
151 return;
152
153 (void) drmBOUnreference(kbo->fd, &kbo->bo);
154 free(kbo);
155 }
156
157
158 static void
159 driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
160 struct timeval *time)
161 {
162 drmMMListHead *list, *next;
163 struct _DriKernelBO *kbo;
164
165 if (!driTimeAfterEq(time, &fMan->nextCheck))
166 return;
167
168 for (list = fMan->timeoutList.next, next = list->next;
169 list != &fMan->timeoutList;
170 list = next, next = list->next) {
171
172 kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
173
174 if (!driTimeAfterEq(time, &kbo->timeFreed))
175 break;
176
177 DRMLISTDELINIT(&kbo->timeoutHead);
178 DRMLISTDELINIT(&kbo->head);
179 driFreeKernelBO(kbo);
180 }
181
182 fMan->nextCheck = *time;
183 driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
184 }
185
186
187 /*
188 * Add a _DriKernelBO to the free slab manager.
189 * This means that it is available for reuse, but if it's not
190 * reused in a while, it will be freed.
191 */
192
193 static void
194 driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
195 struct _DriKernelBO *kbo)
196 {
197 struct timeval time;
198
199 pipe_mutex_lock(fMan->mutex);
200 gettimeofday(&time, NULL);
201 driTimeAdd(&time, &fMan->slabTimeout);
202
203 kbo->timeFreed = time;
204
205 if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
206 DRMLISTADD(&kbo->head, &fMan->cached);
207 else
208 DRMLISTADD(&kbo->head, &fMan->unCached);
209
210 DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
211 driFreeTimeoutKBOsLocked(fMan, &time);
212
213 pipe_mutex_unlock(fMan->mutex);
214 }
215
216 /*
217 * Get a _DriKernelBO for us to use as storage for a slab.
218 *
219 */
220
221 static struct _DriKernelBO *
222 driAllocKernelBO(struct _DriSlabSizeHeader *header)
223
224 {
225 struct _DriSlabPool *slabPool = header->slabPool;
226 struct _DriFreeSlabManager *fMan = slabPool->fMan;
227 drmMMListHead *list, *next, *head;
228 uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
229 struct _DriKernelBO *kbo;
230 struct _DriKernelBO *kboTmp;
231 int ret;
232
233 /*
234 * FIXME: We should perhaps allow some variation in slabsize in order
235 * to efficiently reuse slabs.
236 */
237
238 size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
239 size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
240 pipe_mutex_lock(fMan->mutex);
241
242 kbo = NULL;
243
244 retry:
245 head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
246 &fMan->cached : &fMan->unCached;
247
248 for (list = head->next, next = list->next;
249 list != head;
250 list = next, next = list->next) {
251
252 kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
253
254 if ((kboTmp->bo.size == size) &&
255 (slabPool->pageAlignment == 0 ||
256 (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
257
258 if (!kbo)
259 kbo = kboTmp;
260
261 if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
262 break;
263
264 }
265 }
266
267 if (kbo) {
268 DRMLISTDELINIT(&kbo->head);
269 DRMLISTDELINIT(&kbo->timeoutHead);
270 }
271
272 pipe_mutex_unlock(fMan->mutex);
273
274 if (kbo) {
275 uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
276
277 ret = 0;
278 if (new_mask) {
279 ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
280 new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
281 }
282 if (ret == 0)
283 return kbo;
284
285 driFreeKernelBO(kbo);
286 kbo = NULL;
287 goto retry;
288 }
289
290 kbo = calloc(1, sizeof(struct _DriKernelBO));
291 if (!kbo)
292 return NULL;
293
294 kbo->fd = slabPool->fd;
295 DRMINITLISTHEAD(&kbo->head);
296 DRMINITLISTHEAD(&kbo->timeoutHead);
297 ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
298 slabPool->proposedFlags,
299 DRM_BO_HINT_DONT_FENCE, &kbo->bo);
300 if (ret)
301 goto out_err0;
302
303 ret = drmBOMap(kbo->fd, &kbo->bo,
304 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
305 0, &kbo->virtual);
306
307 if (ret)
308 goto out_err1;
309
310 ret = drmBOUnmap(kbo->fd, &kbo->bo);
311 if (ret)
312 goto out_err1;
313
314 return kbo;
315
316 out_err1:
317 drmBOUnreference(kbo->fd, &kbo->bo);
318 out_err0:
319 free(kbo);
320 return NULL;
321 }
322
323
324 static int
325 driAllocSlab(struct _DriSlabSizeHeader *header)
326 {
327 struct _DriSlab *slab;
328 struct _DriSlabBuffer *buf;
329 uint32_t numBuffers;
330 int ret;
331 int i;
332
333 slab = calloc(1, sizeof(*slab));
334 if (!slab)
335 return -ENOMEM;
336
337 slab->kbo = driAllocKernelBO(header);
338 if (!slab->kbo) {
339 ret = -ENOMEM;
340 goto out_err0;
341 }
342
343 numBuffers = slab->kbo->bo.size / header->bufSize;
344
345 slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
346 if (!slab->buffers) {
347 ret = -ENOMEM;
348 goto out_err1;
349 }
350
351 DRMINITLISTHEAD(&slab->head);
352 DRMINITLISTHEAD(&slab->freeBuffers);
353 slab->numBuffers = numBuffers;
354 slab->numFree = 0;
355 slab->header = header;
356
357 buf = slab->buffers;
358 for (i=0; i < numBuffers; ++i) {
359 buf->parent = slab;
360 buf->start = i* header->bufSize;
361 buf->mapCount = 0;
362 buf->isSlabBuffer = 1;
363 pipe_condvar_init(buf->event);
364 DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
365 slab->numFree++;
366 buf++;
367 }
368
369 DRMLISTADDTAIL(&slab->head, &header->slabs);
370
371 return 0;
372
373 out_err1:
374 driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
375 free(slab->buffers);
376 out_err0:
377 free(slab);
378 return ret;
379 }
380
381 /*
382 * Delete a buffer from the slab header delayed list and put
383 * it on the slab free list.
384 */
385
386 static void
387 driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
388 {
389 struct _DriSlab *slab = buf->parent;
390 struct _DriSlabSizeHeader *header = slab->header;
391 drmMMListHead *list = &buf->head;
392
393 DRMLISTDEL(list);
394 DRMLISTADDTAIL(list, &slab->freeBuffers);
395 slab->numFree++;
396
397 if (slab->head.next == &slab->head)
398 DRMLISTADDTAIL(&slab->head, &header->slabs);
399
400 if (slab->numFree == slab->numBuffers) {
401 list = &slab->head;
402 DRMLISTDEL(list);
403 DRMLISTADDTAIL(list, &header->freeSlabs);
404 }
405
406 if (header->slabs.next == &header->slabs ||
407 slab->numFree != slab->numBuffers) {
408
409 drmMMListHead *next;
410 struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
411
412 for (list = header->freeSlabs.next, next = list->next;
413 list != &header->freeSlabs;
414 list = next, next = list->next) {
415
416 slab = DRMLISTENTRY(struct _DriSlab, list, head);
417
418 DRMLISTDELINIT(list);
419 driSetKernelBOFree(fMan, slab->kbo);
420 free(slab->buffers);
421 free(slab);
422 }
423 }
424 }
425
426 static void
427 driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
428 {
429 drmMMListHead *list, *prev, *first;
430 struct _DriSlabBuffer *buf;
431 struct _DriSlab *slab;
432 int firstWasSignaled = 1;
433 int signaled;
434 int i;
435 int ret;
436
437 /*
438 * Rerun the freeing test if the youngest tested buffer
439 * was signaled, since there might be more idle buffers
440 * in the delay list.
441 */
442
443 while (firstWasSignaled) {
444 firstWasSignaled = 0;
445 signaled = 0;
446 first = header->delayedBuffers.next;
447
448 /* Only examine the oldest 1/3 of delayed buffers:
449 */
450 if (header->numDelayed > 3) {
451 for (i = 0; i < header->numDelayed; i += 3) {
452 first = first->next;
453 }
454 }
455
456 for (list = first, prev = list->prev;
457 list != &header->delayedBuffers;
458 list = prev, prev = list->prev) {
459 buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
460 slab = buf->parent;
461
462 if (!signaled) {
463 if (wait) {
464 ret = driFenceFinish(buf->fence, buf->fenceType, 0);
465 if (ret)
466 break;
467 signaled = 1;
468 wait = 0;
469 } else {
470 signaled = driFenceSignaled(buf->fence, buf->fenceType);
471 }
472 if (signaled) {
473 if (list == first)
474 firstWasSignaled = 1;
475 driFenceUnReference(&buf->fence);
476 header->numDelayed--;
477 driSlabFreeBufferLocked(buf);
478 }
479 } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
480 driFenceUnReference(&buf->fence);
481 header->numDelayed--;
482 driSlabFreeBufferLocked(buf);
483 }
484 }
485 }
486 }
487
488
489 static struct _DriSlabBuffer *
490 driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
491 {
492 static struct _DriSlabBuffer *buf;
493 struct _DriSlab *slab;
494 drmMMListHead *list;
495 int count = DRI_SLABPOOL_ALLOC_RETRIES;
496
497 pipe_mutex_lock(header->mutex);
498 while(header->slabs.next == &header->slabs && count > 0) {
499 driSlabCheckFreeLocked(header, 0);
500 if (header->slabs.next != &header->slabs)
501 break;
502
503 pipe_mutex_unlock(header->mutex);
504 if (count != DRI_SLABPOOL_ALLOC_RETRIES)
505 usleep(1);
506 pipe_mutex_lock(header->mutex);
507 (void) driAllocSlab(header);
508 count--;
509 }
510
511 list = header->slabs.next;
512 if (list == &header->slabs) {
513 pipe_mutex_unlock(header->mutex);
514 return NULL;
515 }
516 slab = DRMLISTENTRY(struct _DriSlab, list, head);
517 if (--slab->numFree == 0)
518 DRMLISTDELINIT(list);
519
520 list = slab->freeBuffers.next;
521 DRMLISTDELINIT(list);
522
523 pipe_mutex_unlock(header->mutex);
524 buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
525 return buf;
526 }
527
528 static void *
529 pool_create(struct _DriBufferPool *driPool, unsigned long size,
530 uint64_t flags, unsigned hint, unsigned alignment)
531 {
532 struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
533 struct _DriSlabSizeHeader *header;
534 struct _DriSlabBuffer *buf;
535 void *dummy;
536 int i;
537 int ret;
538
539 /*
540 * FIXME: Check for compatibility.
541 */
542
543 header = pool->headers;
544 for (i=0; i<pool->numBuckets; ++i) {
545 if (header->bufSize >= size)
546 break;
547 header++;
548 }
549
550 if (i < pool->numBuckets)
551 return driSlabAllocBuffer(header);
552
553
554 /*
555 * Fall back to allocate a buffer object directly from DRM.
556 * and wrap it in a driBO structure.
557 */
558
559
560 buf = calloc(1, sizeof(*buf));
561
562 if (!buf)
563 return NULL;
564
565 buf->bo = calloc(1, sizeof(*buf->bo));
566 if (!buf->bo)
567 goto out_err0;
568
569 if (alignment) {
570 if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
571 goto out_err1;
572 if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
573 goto out_err1;
574 }
575
576 ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
577 flags, hint, buf->bo);
578 if (ret)
579 goto out_err1;
580
581 ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
582 0, &dummy);
583 if (ret)
584 goto out_err2;
585
586 ret = drmBOUnmap(pool->fd, buf->bo);
587 if (ret)
588 goto out_err2;
589
590 return buf;
591 out_err2:
592 drmBOUnreference(pool->fd, buf->bo);
593 out_err1:
594 free(buf->bo);
595 out_err0:
596 free(buf);
597 return NULL;
598 }
599
600 static int
601 pool_destroy(struct _DriBufferPool *driPool, void *private)
602 {
603 struct _DriSlabBuffer *buf =
604 (struct _DriSlabBuffer *) private;
605 struct _DriSlab *slab;
606 struct _DriSlabSizeHeader *header;
607
608 if (!buf->isSlabBuffer) {
609 struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
610 int ret;
611
612 ret = drmBOUnreference(pool->fd, buf->bo);
613 free(buf->bo);
614 free(buf);
615 return ret;
616 }
617
618 slab = buf->parent;
619 header = slab->header;
620
621 pipe_mutex_lock(header->mutex);
622 buf->unFenced = 0;
623 buf->mapCount = 0;
624
625 if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
626 DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
627 header->numDelayed++;
628 } else {
629 if (buf->fence)
630 driFenceUnReference(&buf->fence);
631 driSlabFreeBufferLocked(buf);
632 }
633
634 pipe_mutex_unlock(header->mutex);
635 return 0;
636 }
637
638 static int
639 pool_waitIdle(struct _DriBufferPool *driPool, void *private,
640 pipe_mutex *mutex, int lazy)
641 {
642 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
643
644 while(buf->unFenced)
645 pipe_condvar_wait(buf->event, *mutex);
646
647 if (!buf->fence)
648 return 0;
649
650 driFenceFinish(buf->fence, buf->fenceType, lazy);
651 driFenceUnReference(&buf->fence);
652
653 return 0;
654 }
655
656 static int
657 pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
658 int hint, pipe_mutex *mutex, void **virtual)
659 {
660 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
661 int busy;
662
663 if (buf->isSlabBuffer)
664 busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
665 else
666 busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
667
668
669 if (busy) {
670 if (hint & DRM_BO_HINT_DONT_BLOCK)
671 return -EBUSY;
672 else {
673 (void) pool_waitIdle(pool, private, mutex, 0);
674 }
675 }
676
677 ++buf->mapCount;
678 *virtual = (buf->isSlabBuffer) ?
679 (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
680 (void *) buf->bo->virtual;
681
682 return 0;
683 }
684
685 static int
686 pool_unmap(struct _DriBufferPool *pool, void *private)
687 {
688 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
689
690 --buf->mapCount;
691 if (buf->mapCount == 0 && buf->isSlabBuffer)
692 pipe_condvar_broadcast(buf->event);
693
694 return 0;
695 }
696
697 static unsigned long
698 pool_offset(struct _DriBufferPool *pool, void *private)
699 {
700 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
701 struct _DriSlab *slab;
702 struct _DriSlabSizeHeader *header;
703
704 if (!buf->isSlabBuffer) {
705 assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
706 return buf->bo->offset;
707 }
708
709 slab = buf->parent;
710 header = slab->header;
711
712 (void) header;
713 assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
714 return slab->kbo->bo.offset + buf->start;
715 }
716
717 static unsigned long
718 pool_poolOffset(struct _DriBufferPool *pool, void *private)
719 {
720 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
721
722 return buf->start;
723 }
724
725 static uint64_t
726 pool_flags(struct _DriBufferPool *pool, void *private)
727 {
728 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
729
730 if (!buf->isSlabBuffer)
731 return buf->bo->flags;
732
733 return buf->parent->kbo->bo.flags;
734 }
735
736 static unsigned long
737 pool_size(struct _DriBufferPool *pool, void *private)
738 {
739 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
740 if (!buf->isSlabBuffer)
741 return buf->bo->size;
742
743 return buf->parent->header->bufSize;
744 }
745
746 static int
747 pool_fence(struct _DriBufferPool *pool, void *private,
748 struct _DriFenceObject *fence)
749 {
750 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
751 drmBO *bo;
752
753 if (buf->fence)
754 driFenceUnReference(&buf->fence);
755
756 buf->fence = driFenceReference(fence);
757 bo = (buf->isSlabBuffer) ?
758 &buf->parent->kbo->bo:
759 buf->bo;
760 buf->fenceType = bo->fenceFlags;
761
762 buf->unFenced = 0;
763 pipe_condvar_broadcast(buf->event);
764
765 return 0;
766 }
767
768 static drmBO *
769 pool_kernel(struct _DriBufferPool *pool, void *private)
770 {
771 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
772
773 return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
774 }
775
776 static int
777 pool_validate(struct _DriBufferPool *pool, void *private,
778 pipe_mutex *mutex)
779 {
780 struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
781
782 if (!buf->isSlabBuffer)
783 return 0;
784
785 while(buf->mapCount != 0)
786 pipe_condvar_wait(buf->event, *mutex);
787
788 buf->unFenced = 1;
789 return 0;
790 }
791
792
793 struct _DriFreeSlabManager *
794 driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
795 {
796 struct _DriFreeSlabManager *tmp;
797
798 tmp = calloc(1, sizeof(*tmp));
799 if (!tmp)
800 return NULL;
801
802 pipe_mutex_init(tmp->mutex);
803 pipe_mutex_lock(tmp->mutex);
804 tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
805 tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
806 tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
807
808 tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
809 tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
810 tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
811
812 gettimeofday(&tmp->nextCheck, NULL);
813 driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
814 DRMINITLISTHEAD(&tmp->timeoutList);
815 DRMINITLISTHEAD(&tmp->unCached);
816 DRMINITLISTHEAD(&tmp->cached);
817 pipe_mutex_unlock(tmp->mutex);
818
819 return tmp;
820 }
821
822 void
823 driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
824 {
825 struct timeval time;
826
827 time = fMan->nextCheck;
828 driTimeAdd(&time, &fMan->checkInterval);
829
830 pipe_mutex_lock(fMan->mutex);
831 driFreeTimeoutKBOsLocked(fMan, &time);
832 pipe_mutex_unlock(fMan->mutex);
833
834 assert(fMan->timeoutList.next == &fMan->timeoutList);
835 assert(fMan->unCached.next == &fMan->unCached);
836 assert(fMan->cached.next == &fMan->cached);
837
838 free(fMan);
839 }
840
841 static void
842 driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
843 struct _DriSlabSizeHeader *header)
844 {
845 pipe_mutex_init(header->mutex);
846 pipe_mutex_lock(header->mutex);
847
848 DRMINITLISTHEAD(&header->slabs);
849 DRMINITLISTHEAD(&header->freeSlabs);
850 DRMINITLISTHEAD(&header->delayedBuffers);
851
852 header->numDelayed = 0;
853 header->slabPool = pool;
854 header->bufSize = size;
855
856 pipe_mutex_unlock(header->mutex);
857 }
858
859 static void
860 driFinishSizeHeader(struct _DriSlabSizeHeader *header)
861 {
862 drmMMListHead *list, *next;
863 struct _DriSlabBuffer *buf;
864
865 pipe_mutex_lock(header->mutex);
866 for (list = header->delayedBuffers.next, next = list->next;
867 list != &header->delayedBuffers;
868 list = next, next = list->next) {
869
870 buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
871 if (buf->fence) {
872 (void) driFenceFinish(buf->fence, buf->fenceType, 0);
873 driFenceUnReference(&buf->fence);
874 }
875 header->numDelayed--;
876 driSlabFreeBufferLocked(buf);
877 }
878 pipe_mutex_unlock(header->mutex);
879 }
880
881 static void
882 pool_takedown(struct _DriBufferPool *driPool)
883 {
884 struct _DriSlabPool *pool = driPool->data;
885 int i;
886
887 for (i=0; i<pool->numBuckets; ++i) {
888 driFinishSizeHeader(&pool->headers[i]);
889 }
890
891 free(pool->headers);
892 free(pool->bucketSizes);
893 free(pool);
894 free(driPool);
895 }
896
897 struct _DriBufferPool *
898 driSlabPoolInit(int fd, uint64_t flags,
899 uint64_t validMask,
900 uint32_t smallestSize,
901 uint32_t numSizes,
902 uint32_t desiredNumBuffers,
903 uint32_t maxSlabSize,
904 uint32_t pageAlignment,
905 struct _DriFreeSlabManager *fMan)
906 {
907 struct _DriBufferPool *driPool;
908 struct _DriSlabPool *pool;
909 uint32_t i;
910
911 driPool = calloc(1, sizeof(*driPool));
912 if (!driPool)
913 return NULL;
914
915 pool = calloc(1, sizeof(*pool));
916 if (!pool)
917 goto out_err0;
918
919 pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
920 if (!pool->bucketSizes)
921 goto out_err1;
922
923 pool->headers = calloc(numSizes, sizeof(*pool->headers));
924 if (!pool->headers)
925 goto out_err2;
926
927 pool->fMan = fMan;
928 pool->proposedFlags = flags;
929 pool->validMask = validMask;
930 pool->numBuckets = numSizes;
931 pool->pageSize = getpagesize();
932 pool->fd = fd;
933 pool->pageAlignment = pageAlignment;
934 pool->maxSlabSize = maxSlabSize;
935 pool->desiredNumBuffers = desiredNumBuffers;
936
937 for (i=0; i<pool->numBuckets; ++i) {
938 pool->bucketSizes[i] = (smallestSize << i);
939 driInitSizeHeader(pool, pool->bucketSizes[i],
940 &pool->headers[i]);
941 }
942
943 driPool->data = (void *) pool;
944 driPool->map = &pool_map;
945 driPool->unmap = &pool_unmap;
946 driPool->destroy = &pool_destroy;
947 driPool->offset = &pool_offset;
948 driPool->poolOffset = &pool_poolOffset;
949 driPool->flags = &pool_flags;
950 driPool->size = &pool_size;
951 driPool->create = &pool_create;
952 driPool->fence = &pool_fence;
953 driPool->kernel = &pool_kernel;
954 driPool->validate = &pool_validate;
955 driPool->waitIdle = &pool_waitIdle;
956 driPool->takeDown = &pool_takedown;
957
958 return driPool;
959
960 out_err2:
961 free(pool->bucketSizes);
962 out_err1:
963 free(pool);
964 out_err0:
965 free(driPool);
966
967 return NULL;
968 }