1 /**************************************************************************
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
37 #include "ws_dri_bufpool.h"
38 #include "ws_dri_fencemgr.h"
39 #include "ws_dri_bufmgr.h"
40 #include "pipe/p_thread.h"
42 #define DRI_SLABPOOL_ALLOC_RETRIES 100
46 struct _DriSlabBuffer
{
49 struct _DriFenceObject
*fence
;
50 struct _DriSlab
*parent
;
62 drmMMListHead timeoutHead
;
64 struct timeval timeFreed
;
65 uint32_t pageAlignment
;
71 drmMMListHead freeBuffers
;
74 struct _DriSlabBuffer
*buffers
;
75 struct _DriSlabSizeHeader
*header
;
76 struct _DriKernelBO
*kbo
;
80 struct _DriSlabSizeHeader
{
82 drmMMListHead freeSlabs
;
83 drmMMListHead delayedBuffers
;
85 struct _DriSlabPool
*slabPool
;
90 struct _DriFreeSlabManager
{
91 struct timeval slabTimeout
;
92 struct timeval checkInterval
;
93 struct timeval nextCheck
;
94 drmMMListHead timeoutList
;
95 drmMMListHead unCached
;
101 struct _DriSlabPool
{
104 * The data of this structure remains constant after
105 * initialization and thus needs no mutex protection.
108 struct _DriFreeSlabManager
*fMan
;
109 uint64_t proposedFlags
;
111 uint32_t *bucketSizes
;
117 int desiredNumBuffers
;
118 struct _DriSlabSizeHeader
*headers
;
122 * FIXME: Perhaps arrange timeout slabs in size buckets for fast
128 driTimeAfterEq(struct timeval
*arg1
, struct timeval
*arg2
)
130 return ((arg1
->tv_sec
> arg2
->tv_sec
) ||
131 ((arg1
->tv_sec
== arg2
->tv_sec
) &&
132 (arg1
->tv_usec
> arg2
->tv_usec
)));
136 driTimeAdd(struct timeval
*arg
, struct timeval
*add
)
140 arg
->tv_sec
+= add
->tv_sec
;
141 arg
->tv_usec
+= add
->tv_usec
;
142 sec
= arg
->tv_usec
/ 1000000;
144 arg
->tv_usec
-= sec
*1000000;
148 driFreeKernelBO(struct _DriKernelBO
*kbo
)
153 (void) drmBOUnreference(kbo
->fd
, &kbo
->bo
);
159 driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager
*fMan
,
160 struct timeval
*time
)
162 drmMMListHead
*list
, *next
;
163 struct _DriKernelBO
*kbo
;
165 if (!driTimeAfterEq(time
, &fMan
->nextCheck
))
168 for (list
= fMan
->timeoutList
.next
, next
= list
->next
;
169 list
!= &fMan
->timeoutList
;
170 list
= next
, next
= list
->next
) {
172 kbo
= DRMLISTENTRY(struct _DriKernelBO
, list
, timeoutHead
);
174 if (!driTimeAfterEq(time
, &kbo
->timeFreed
))
177 DRMLISTDELINIT(&kbo
->timeoutHead
);
178 DRMLISTDELINIT(&kbo
->head
);
179 driFreeKernelBO(kbo
);
182 fMan
->nextCheck
= *time
;
183 driTimeAdd(&fMan
->nextCheck
, &fMan
->checkInterval
);
188 * Add a _DriKernelBO to the free slab manager.
189 * This means that it is available for reuse, but if it's not
190 * reused in a while, it will be freed.
194 driSetKernelBOFree(struct _DriFreeSlabManager
*fMan
,
195 struct _DriKernelBO
*kbo
)
199 pipe_mutex_lock(fMan
->mutex
);
200 gettimeofday(&time
, NULL
);
201 driTimeAdd(&time
, &fMan
->slabTimeout
);
203 kbo
->timeFreed
= time
;
205 if (kbo
->bo
.flags
& DRM_BO_FLAG_CACHED
)
206 DRMLISTADD(&kbo
->head
, &fMan
->cached
);
208 DRMLISTADD(&kbo
->head
, &fMan
->unCached
);
210 DRMLISTADDTAIL(&kbo
->timeoutHead
, &fMan
->timeoutList
);
211 driFreeTimeoutKBOsLocked(fMan
, &time
);
213 pipe_mutex_unlock(fMan
->mutex
);
217 * Get a _DriKernelBO for us to use as storage for a slab.
221 static struct _DriKernelBO
*
222 driAllocKernelBO(struct _DriSlabSizeHeader
*header
)
225 struct _DriSlabPool
*slabPool
= header
->slabPool
;
226 struct _DriFreeSlabManager
*fMan
= slabPool
->fMan
;
227 drmMMListHead
*list
, *next
, *head
;
228 uint32_t size
= header
->bufSize
* slabPool
->desiredNumBuffers
;
229 struct _DriKernelBO
*kbo
;
230 struct _DriKernelBO
*kboTmp
;
234 * FIXME: We should perhaps allow some variation in slabsize in order
235 * to efficiently reuse slabs.
238 size
= (size
<= slabPool
->maxSlabSize
) ? size
: slabPool
->maxSlabSize
;
239 size
= (size
+ slabPool
->pageSize
- 1) & ~(slabPool
->pageSize
- 1);
240 pipe_mutex_lock(fMan
->mutex
);
245 head
= (slabPool
->proposedFlags
& DRM_BO_FLAG_CACHED
) ?
246 &fMan
->cached
: &fMan
->unCached
;
248 for (list
= head
->next
, next
= list
->next
;
250 list
= next
, next
= list
->next
) {
252 kboTmp
= DRMLISTENTRY(struct _DriKernelBO
, list
, head
);
254 if ((kboTmp
->bo
.size
== size
) &&
255 (slabPool
->pageAlignment
== 0 ||
256 (kboTmp
->pageAlignment
% slabPool
->pageAlignment
) == 0)) {
261 if ((kbo
->bo
.proposedFlags
^ slabPool
->proposedFlags
) == 0)
268 DRMLISTDELINIT(&kbo
->head
);
269 DRMLISTDELINIT(&kbo
->timeoutHead
);
272 pipe_mutex_unlock(fMan
->mutex
);
275 uint64_t new_mask
= kbo
->bo
.proposedFlags
^ slabPool
->proposedFlags
;
279 ret
= drmBOSetStatus(kbo
->fd
, &kbo
->bo
, slabPool
->proposedFlags
,
280 new_mask
, DRM_BO_HINT_DONT_FENCE
, 0, 0);
285 driFreeKernelBO(kbo
);
290 kbo
= calloc(1, sizeof(struct _DriKernelBO
));
294 kbo
->fd
= slabPool
->fd
;
295 DRMINITLISTHEAD(&kbo
->head
);
296 DRMINITLISTHEAD(&kbo
->timeoutHead
);
297 ret
= drmBOCreate(kbo
->fd
, size
, slabPool
->pageAlignment
, NULL
,
298 slabPool
->proposedFlags
,
299 DRM_BO_HINT_DONT_FENCE
, &kbo
->bo
);
303 ret
= drmBOMap(kbo
->fd
, &kbo
->bo
,
304 DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
,
310 ret
= drmBOUnmap(kbo
->fd
, &kbo
->bo
);
317 drmBOUnreference(kbo
->fd
, &kbo
->bo
);
325 driAllocSlab(struct _DriSlabSizeHeader
*header
)
327 struct _DriSlab
*slab
;
328 struct _DriSlabBuffer
*buf
;
333 slab
= calloc(1, sizeof(*slab
));
337 slab
->kbo
= driAllocKernelBO(header
);
343 numBuffers
= slab
->kbo
->bo
.size
/ header
->bufSize
;
345 slab
->buffers
= calloc(numBuffers
, sizeof(*slab
->buffers
));
346 if (!slab
->buffers
) {
351 DRMINITLISTHEAD(&slab
->head
);
352 DRMINITLISTHEAD(&slab
->freeBuffers
);
353 slab
->numBuffers
= numBuffers
;
355 slab
->header
= header
;
358 for (i
=0; i
< numBuffers
; ++i
) {
360 buf
->start
= i
* header
->bufSize
;
362 buf
->isSlabBuffer
= 1;
363 pipe_condvar_init(buf
->event
);
364 DRMLISTADDTAIL(&buf
->head
, &slab
->freeBuffers
);
369 DRMLISTADDTAIL(&slab
->head
, &header
->slabs
);
374 driSetKernelBOFree(header
->slabPool
->fMan
, slab
->kbo
);
382 * Delete a buffer from the slab header delayed list and put
383 * it on the slab free list.
387 driSlabFreeBufferLocked(struct _DriSlabBuffer
*buf
)
389 struct _DriSlab
*slab
= buf
->parent
;
390 struct _DriSlabSizeHeader
*header
= slab
->header
;
391 drmMMListHead
*list
= &buf
->head
;
394 DRMLISTADDTAIL(list
, &slab
->freeBuffers
);
397 if (slab
->head
.next
== &slab
->head
)
398 DRMLISTADDTAIL(&slab
->head
, &header
->slabs
);
400 if (slab
->numFree
== slab
->numBuffers
) {
403 DRMLISTADDTAIL(list
, &header
->freeSlabs
);
406 if (header
->slabs
.next
== &header
->slabs
||
407 slab
->numFree
!= slab
->numBuffers
) {
410 struct _DriFreeSlabManager
*fMan
= header
->slabPool
->fMan
;
412 for (list
= header
->freeSlabs
.next
, next
= list
->next
;
413 list
!= &header
->freeSlabs
;
414 list
= next
, next
= list
->next
) {
416 slab
= DRMLISTENTRY(struct _DriSlab
, list
, head
);
418 DRMLISTDELINIT(list
);
419 driSetKernelBOFree(fMan
, slab
->kbo
);
427 driSlabCheckFreeLocked(struct _DriSlabSizeHeader
*header
, int wait
)
429 drmMMListHead
*list
, *prev
, *first
;
430 struct _DriSlabBuffer
*buf
;
431 struct _DriSlab
*slab
;
432 int firstWasSignaled
= 1;
438 * Rerun the freeing test if the youngest tested buffer
439 * was signaled, since there might be more idle buffers
443 while (firstWasSignaled
) {
444 firstWasSignaled
= 0;
446 first
= header
->delayedBuffers
.next
;
448 /* Only examine the oldest 1/3 of delayed buffers:
450 if (header
->numDelayed
> 3) {
451 for (i
= 0; i
< header
->numDelayed
; i
+= 3) {
456 for (list
= first
, prev
= list
->prev
;
457 list
!= &header
->delayedBuffers
;
458 list
= prev
, prev
= list
->prev
) {
459 buf
= DRMLISTENTRY(struct _DriSlabBuffer
, list
, head
);
464 ret
= driFenceFinish(buf
->fence
, buf
->fenceType
, 0);
470 signaled
= driFenceSignaled(buf
->fence
, buf
->fenceType
);
474 firstWasSignaled
= 1;
475 driFenceUnReference(&buf
->fence
);
476 header
->numDelayed
--;
477 driSlabFreeBufferLocked(buf
);
479 } else if (driFenceSignaledCached(buf
->fence
, buf
->fenceType
)) {
480 driFenceUnReference(&buf
->fence
);
481 header
->numDelayed
--;
482 driSlabFreeBufferLocked(buf
);
489 static struct _DriSlabBuffer
*
490 driSlabAllocBuffer(struct _DriSlabSizeHeader
*header
)
492 static struct _DriSlabBuffer
*buf
;
493 struct _DriSlab
*slab
;
495 int count
= DRI_SLABPOOL_ALLOC_RETRIES
;
497 pipe_mutex_lock(header
->mutex
);
498 while(header
->slabs
.next
== &header
->slabs
&& count
> 0) {
499 driSlabCheckFreeLocked(header
, 0);
500 if (header
->slabs
.next
!= &header
->slabs
)
503 pipe_mutex_unlock(header
->mutex
);
504 if (count
!= DRI_SLABPOOL_ALLOC_RETRIES
)
506 pipe_mutex_lock(header
->mutex
);
507 (void) driAllocSlab(header
);
511 list
= header
->slabs
.next
;
512 if (list
== &header
->slabs
) {
513 pipe_mutex_unlock(header
->mutex
);
516 slab
= DRMLISTENTRY(struct _DriSlab
, list
, head
);
517 if (--slab
->numFree
== 0)
518 DRMLISTDELINIT(list
);
520 list
= slab
->freeBuffers
.next
;
521 DRMLISTDELINIT(list
);
523 pipe_mutex_unlock(header
->mutex
);
524 buf
= DRMLISTENTRY(struct _DriSlabBuffer
, list
, head
);
529 pool_create(struct _DriBufferPool
*driPool
, unsigned long size
,
530 uint64_t flags
, unsigned hint
, unsigned alignment
)
532 struct _DriSlabPool
*pool
= (struct _DriSlabPool
*) driPool
->data
;
533 struct _DriSlabSizeHeader
*header
;
534 struct _DriSlabBuffer
*buf
;
540 * FIXME: Check for compatibility.
543 header
= pool
->headers
;
544 for (i
=0; i
<pool
->numBuckets
; ++i
) {
545 if (header
->bufSize
>= size
)
550 if (i
< pool
->numBuckets
)
551 return driSlabAllocBuffer(header
);
555 * Fall back to allocate a buffer object directly from DRM.
556 * and wrap it in a driBO structure.
560 buf
= calloc(1, sizeof(*buf
));
565 buf
->bo
= calloc(1, sizeof(*buf
->bo
));
570 if ((alignment
< pool
->pageSize
) && (pool
->pageSize
% alignment
))
572 if ((alignment
> pool
->pageSize
) && (alignment
% pool
->pageSize
))
576 ret
= drmBOCreate(pool
->fd
, size
, alignment
/ pool
->pageSize
, NULL
,
577 flags
, hint
, buf
->bo
);
581 ret
= drmBOMap(pool
->fd
, buf
->bo
, DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
,
586 ret
= drmBOUnmap(pool
->fd
, buf
->bo
);
592 drmBOUnreference(pool
->fd
, buf
->bo
);
601 pool_destroy(struct _DriBufferPool
*driPool
, void *private)
603 struct _DriSlabBuffer
*buf
=
604 (struct _DriSlabBuffer
*) private;
605 struct _DriSlab
*slab
;
606 struct _DriSlabSizeHeader
*header
;
608 if (!buf
->isSlabBuffer
) {
609 struct _DriSlabPool
*pool
= (struct _DriSlabPool
*) driPool
->data
;
612 ret
= drmBOUnreference(pool
->fd
, buf
->bo
);
619 header
= slab
->header
;
621 pipe_mutex_lock(header
->mutex
);
625 if (buf
->fence
&& !driFenceSignaledCached(buf
->fence
, buf
->fenceType
)) {
626 DRMLISTADDTAIL(&buf
->head
, &header
->delayedBuffers
);
627 header
->numDelayed
++;
630 driFenceUnReference(&buf
->fence
);
631 driSlabFreeBufferLocked(buf
);
634 pipe_mutex_unlock(header
->mutex
);
639 pool_waitIdle(struct _DriBufferPool
*driPool
, void *private,
640 pipe_mutex
*mutex
, int lazy
)
642 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
645 pipe_condvar_wait(buf
->event
, *mutex
);
650 driFenceFinish(buf
->fence
, buf
->fenceType
, lazy
);
651 driFenceUnReference(&buf
->fence
);
657 pool_map(struct _DriBufferPool
*pool
, void *private, unsigned flags
,
658 int hint
, pipe_mutex
*mutex
, void **virtual)
660 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
663 if (buf
->isSlabBuffer
)
664 busy
= buf
->unFenced
|| (buf
->fence
&& !driFenceSignaledCached(buf
->fence
, buf
->fenceType
));
666 busy
= buf
->fence
&& !driFenceSignaled(buf
->fence
, buf
->fenceType
);
670 if (hint
& DRM_BO_HINT_DONT_BLOCK
)
673 (void) pool_waitIdle(pool
, private, mutex
, 0);
678 *virtual = (buf
->isSlabBuffer
) ?
679 (void *) ((uint8_t *) buf
->parent
->kbo
->virtual + buf
->start
) :
680 (void *) buf
->bo
->virtual;
686 pool_unmap(struct _DriBufferPool
*pool
, void *private)
688 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
691 if (buf
->mapCount
== 0 && buf
->isSlabBuffer
)
692 pipe_condvar_broadcast(buf
->event
);
698 pool_offset(struct _DriBufferPool
*pool
, void *private)
700 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
701 struct _DriSlab
*slab
;
702 struct _DriSlabSizeHeader
*header
;
704 if (!buf
->isSlabBuffer
) {
705 assert(buf
->bo
->proposedFlags
& DRM_BO_FLAG_NO_MOVE
);
706 return buf
->bo
->offset
;
710 header
= slab
->header
;
713 assert(header
->slabPool
->proposedFlags
& DRM_BO_FLAG_NO_MOVE
);
714 return slab
->kbo
->bo
.offset
+ buf
->start
;
718 pool_poolOffset(struct _DriBufferPool
*pool
, void *private)
720 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
726 pool_flags(struct _DriBufferPool
*pool
, void *private)
728 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
730 if (!buf
->isSlabBuffer
)
731 return buf
->bo
->flags
;
733 return buf
->parent
->kbo
->bo
.flags
;
737 pool_size(struct _DriBufferPool
*pool
, void *private)
739 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
740 if (!buf
->isSlabBuffer
)
741 return buf
->bo
->size
;
743 return buf
->parent
->header
->bufSize
;
747 pool_fence(struct _DriBufferPool
*pool
, void *private,
748 struct _DriFenceObject
*fence
)
750 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
754 driFenceUnReference(&buf
->fence
);
756 buf
->fence
= driFenceReference(fence
);
757 bo
= (buf
->isSlabBuffer
) ?
758 &buf
->parent
->kbo
->bo
:
760 buf
->fenceType
= bo
->fenceFlags
;
763 pipe_condvar_broadcast(buf
->event
);
769 pool_kernel(struct _DriBufferPool
*pool
, void *private)
771 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
773 return (buf
->isSlabBuffer
) ? &buf
->parent
->kbo
->bo
: buf
->bo
;
777 pool_validate(struct _DriBufferPool
*pool
, void *private,
780 struct _DriSlabBuffer
*buf
= (struct _DriSlabBuffer
*) private;
782 if (!buf
->isSlabBuffer
)
785 while(buf
->mapCount
!= 0)
786 pipe_condvar_wait(buf
->event
, *mutex
);
793 struct _DriFreeSlabManager
*
794 driInitFreeSlabManager(uint32_t checkIntervalMsec
, uint32_t slabTimeoutMsec
)
796 struct _DriFreeSlabManager
*tmp
;
798 tmp
= calloc(1, sizeof(*tmp
));
802 pipe_mutex_init(tmp
->mutex
);
803 pipe_mutex_lock(tmp
->mutex
);
804 tmp
->slabTimeout
.tv_usec
= slabTimeoutMsec
*1000;
805 tmp
->slabTimeout
.tv_sec
= tmp
->slabTimeout
.tv_usec
/ 1000000;
806 tmp
->slabTimeout
.tv_usec
-= tmp
->slabTimeout
.tv_sec
*1000000;
808 tmp
->checkInterval
.tv_usec
= checkIntervalMsec
*1000;
809 tmp
->checkInterval
.tv_sec
= tmp
->checkInterval
.tv_usec
/ 1000000;
810 tmp
->checkInterval
.tv_usec
-= tmp
->checkInterval
.tv_sec
*1000000;
812 gettimeofday(&tmp
->nextCheck
, NULL
);
813 driTimeAdd(&tmp
->nextCheck
, &tmp
->checkInterval
);
814 DRMINITLISTHEAD(&tmp
->timeoutList
);
815 DRMINITLISTHEAD(&tmp
->unCached
);
816 DRMINITLISTHEAD(&tmp
->cached
);
817 pipe_mutex_unlock(tmp
->mutex
);
823 driFinishFreeSlabManager(struct _DriFreeSlabManager
*fMan
)
827 time
= fMan
->nextCheck
;
828 driTimeAdd(&time
, &fMan
->checkInterval
);
830 pipe_mutex_lock(fMan
->mutex
);
831 driFreeTimeoutKBOsLocked(fMan
, &time
);
832 pipe_mutex_unlock(fMan
->mutex
);
834 assert(fMan
->timeoutList
.next
== &fMan
->timeoutList
);
835 assert(fMan
->unCached
.next
== &fMan
->unCached
);
836 assert(fMan
->cached
.next
== &fMan
->cached
);
842 driInitSizeHeader(struct _DriSlabPool
*pool
, uint32_t size
,
843 struct _DriSlabSizeHeader
*header
)
845 pipe_mutex_init(header
->mutex
);
846 pipe_mutex_lock(header
->mutex
);
848 DRMINITLISTHEAD(&header
->slabs
);
849 DRMINITLISTHEAD(&header
->freeSlabs
);
850 DRMINITLISTHEAD(&header
->delayedBuffers
);
852 header
->numDelayed
= 0;
853 header
->slabPool
= pool
;
854 header
->bufSize
= size
;
856 pipe_mutex_unlock(header
->mutex
);
860 driFinishSizeHeader(struct _DriSlabSizeHeader
*header
)
862 drmMMListHead
*list
, *next
;
863 struct _DriSlabBuffer
*buf
;
865 pipe_mutex_lock(header
->mutex
);
866 for (list
= header
->delayedBuffers
.next
, next
= list
->next
;
867 list
!= &header
->delayedBuffers
;
868 list
= next
, next
= list
->next
) {
870 buf
= DRMLISTENTRY(struct _DriSlabBuffer
, list
, head
);
872 (void) driFenceFinish(buf
->fence
, buf
->fenceType
, 0);
873 driFenceUnReference(&buf
->fence
);
875 header
->numDelayed
--;
876 driSlabFreeBufferLocked(buf
);
878 pipe_mutex_unlock(header
->mutex
);
882 pool_takedown(struct _DriBufferPool
*driPool
)
884 struct _DriSlabPool
*pool
= driPool
->data
;
887 for (i
=0; i
<pool
->numBuckets
; ++i
) {
888 driFinishSizeHeader(&pool
->headers
[i
]);
892 free(pool
->bucketSizes
);
897 struct _DriBufferPool
*
898 driSlabPoolInit(int fd
, uint64_t flags
,
900 uint32_t smallestSize
,
902 uint32_t desiredNumBuffers
,
903 uint32_t maxSlabSize
,
904 uint32_t pageAlignment
,
905 struct _DriFreeSlabManager
*fMan
)
907 struct _DriBufferPool
*driPool
;
908 struct _DriSlabPool
*pool
;
911 driPool
= calloc(1, sizeof(*driPool
));
915 pool
= calloc(1, sizeof(*pool
));
919 pool
->bucketSizes
= calloc(numSizes
, sizeof(*pool
->bucketSizes
));
920 if (!pool
->bucketSizes
)
923 pool
->headers
= calloc(numSizes
, sizeof(*pool
->headers
));
928 pool
->proposedFlags
= flags
;
929 pool
->validMask
= validMask
;
930 pool
->numBuckets
= numSizes
;
931 pool
->pageSize
= getpagesize();
933 pool
->pageAlignment
= pageAlignment
;
934 pool
->maxSlabSize
= maxSlabSize
;
935 pool
->desiredNumBuffers
= desiredNumBuffers
;
937 for (i
=0; i
<pool
->numBuckets
; ++i
) {
938 pool
->bucketSizes
[i
] = (smallestSize
<< i
);
939 driInitSizeHeader(pool
, pool
->bucketSizes
[i
],
943 driPool
->data
= (void *) pool
;
944 driPool
->map
= &pool_map
;
945 driPool
->unmap
= &pool_unmap
;
946 driPool
->destroy
= &pool_destroy
;
947 driPool
->offset
= &pool_offset
;
948 driPool
->poolOffset
= &pool_poolOffset
;
949 driPool
->flags
= &pool_flags
;
950 driPool
->size
= &pool_size
;
951 driPool
->create
= &pool_create
;
952 driPool
->fence
= &pool_fence
;
953 driPool
->kernel
= &pool_kernel
;
954 driPool
->validate
= &pool_validate
;
955 driPool
->waitIdle
= &pool_waitIdle
;
956 driPool
->takeDown
= &pool_takedown
;
961 free(pool
->bucketSizes
);