1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
37 #include "dri_bufmgr.h"
40 #include "simple_list.h"
48 #define BM_NO_BACKING_STORE DRM_BO_FLAG_MEM_PRIV0
49 #define BM_NO_FENCE_SUBDATA DRM_BO_FLAG_MEM_PRIV1
51 /* Wrapper around mm.c's mem_block, which understands that you must
52 * wait for fences to expire before memory can be freed. This is
53 * specific to our use of memcpy for uploads - an upload that was
54 * processed through the command queue wouldn't need to care about
58 struct block
*next
, *prev
;
59 struct mem_block
*mem
; /* BM_MEM_AGP */
61 unsigned referenced
:1;
62 unsigned on_hardware
:1;
65 unsigned fence
; /* BM_MEM_AGP, Split to read_fence, write_fence */
71 typedef struct _bufmgr_fake
{
74 _glthread_Mutex mutex
; /**< for thread safety */
76 unsigned long low_offset
;
80 struct mem_block
*heap
;
81 struct block lru
; /* only allocated, non-fence-pending blocks here */
83 unsigned buf_nr
; /* for generating ids */
85 struct block referenced
; /* after bmBufferOffset */
86 struct block on_hardware
; /* after bmValidateBuffers */
87 struct block fenced
; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
88 /* then to bufmgr->lru or free() */
90 unsigned int last_fence
;
93 unsigned need_fence
:1;
97 * Driver callback to emit a fence, returning the cookie.
99 * Currently, this also requires that a write flush be emitted before
100 * emitting the fence, but this should change.
102 unsigned int (*fence_emit
)(void *private);
103 /** Driver callback to wait for a fence cookie to have passed. */
104 int (*fence_wait
)(void *private, unsigned int fence_cookie
);
105 /** Driver-supplied argument to driver callbacks */
109 typedef struct _dri_bo_fake
{
112 unsigned id
; /* debug only */
116 unsigned int refcount
;
117 /* Flags may consist of any of the DRM_BO flags, plus
118 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
119 * driver private flags.
122 unsigned int alignment
;
127 void (*invalidate_cb
)(dri_bufmgr
*bufmgr
, void * );
128 void *invalidate_ptr
;
131 typedef struct _dri_fence_fake
{
135 unsigned int refcount
;
136 unsigned int fence_cookie
;
140 static int clear_fenced(dri_bufmgr_fake
*bufmgr_fake
,
141 unsigned int fence_cookie
);
143 #define MAXFENCE 0x7fffffff
145 static GLboolean
FENCE_LTE( unsigned a
, unsigned b
)
150 if (a
< b
&& b
- a
< (1<<24))
153 if (a
> b
&& MAXFENCE
- a
+ b
< (1<<24))
160 _fence_emit_internal(dri_bufmgr_fake
*bufmgr_fake
)
162 bufmgr_fake
->last_fence
= bufmgr_fake
->fence_emit(bufmgr_fake
->driver_priv
);
163 return bufmgr_fake
->last_fence
;
167 _fence_wait_internal(dri_bufmgr_fake
*bufmgr_fake
, unsigned int cookie
)
171 ret
= bufmgr_fake
->fence_wait(bufmgr_fake
->driver_priv
, cookie
);
173 _mesa_printf("%s:%d: Error %d waiting for fence.\n",
177 clear_fenced(bufmgr_fake
, cookie
);
181 _fence_test(dri_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
183 /* Slight problem with wrap-around:
185 return fence
== 0 || FENCE_LTE(fence
, bufmgr_fake
->last_fence
);
189 * Allocate a memory manager block for the buffer.
192 alloc_block(dri_bo
*bo
)
194 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
195 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
196 struct block
*block
= (struct block
*)calloc(sizeof *block
, 1);
197 unsigned int align_log2
= ffs(bo_fake
->alignment
);
203 sz
= (bo
->size
+ bo_fake
->alignment
- 1) & ~(bo_fake
->alignment
- 1);
205 block
->mem
= mmAllocMem(bufmgr_fake
->heap
, sz
, align_log2
, 0);
211 make_empty_list(block
);
213 /* Insert at head or at tail???
215 insert_at_tail(&bufmgr_fake
->lru
, block
);
217 block
->virtual = bufmgr_fake
->virtual + block
->mem
->ofs
;
220 bo_fake
->block
= block
;
225 /* Release the card storage associated with buf:
227 static void free_block(dri_bufmgr_fake
*bufmgr_fake
, struct block
*block
)
229 DBG("free block %p\n", block
);
234 if (block
->referenced
) {
235 _mesa_printf("tried to free block on referenced list\n");
238 else if (block
->on_hardware
) {
241 else if (block
->fenced
) {
245 DBG(" - free immediately\n");
246 remove_from_list(block
);
248 mmFreeMem(block
->mem
);
254 alloc_backing_store(dri_bo
*bo
)
256 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
257 assert(!bo_fake
->backing_store
);
258 assert(!(bo_fake
->flags
& (DRM_BO_FLAG_NO_EVICT
|BM_NO_BACKING_STORE
)));
260 bo_fake
->backing_store
= ALIGN_MALLOC(bo
->size
, 64);
264 free_backing_store(dri_bo
*bo
)
266 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
267 assert(!(bo_fake
->flags
& (DRM_BO_FLAG_NO_EVICT
|BM_NO_BACKING_STORE
)));
269 if (bo_fake
->backing_store
) {
270 ALIGN_FREE(bo_fake
->backing_store
);
271 bo_fake
->backing_store
= NULL
;
276 set_dirty(dri_bo
*bo
)
278 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
279 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
281 if (bo_fake
->flags
& BM_NO_BACKING_STORE
)
282 bo_fake
->invalidate_cb(&bufmgr_fake
->bufmgr
, bo_fake
->invalidate_ptr
);
284 assert(!(bo_fake
->flags
& DRM_BO_FLAG_NO_EVICT
));
286 DBG("set_dirty - buf %d\n", bo_fake
->id
);
291 evict_lru(dri_bufmgr_fake
*bufmgr_fake
, GLuint max_fence
)
293 struct block
*block
, *tmp
;
295 DBG("%s\n", __FUNCTION__
);
297 foreach_s(block
, tmp
, &bufmgr_fake
->lru
) {
298 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)block
->bo
;
300 if (bo_fake
!= NULL
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
303 if (block
->fence
&& max_fence
&& !FENCE_LTE(block
->fence
, max_fence
))
306 set_dirty(&bo_fake
->bo
);
307 bo_fake
->block
= NULL
;
309 free_block(bufmgr_fake
, block
);
316 #define foreach_s_rev(ptr, t, list) \
317 for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
320 evict_mru(dri_bufmgr_fake
*bufmgr_fake
)
322 struct block
*block
, *tmp
;
324 DBG("%s\n", __FUNCTION__
);
326 foreach_s_rev(block
, tmp
, &bufmgr_fake
->lru
) {
327 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)block
->bo
;
329 if (bo_fake
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
332 set_dirty(&bo_fake
->bo
);
333 bo_fake
->block
= NULL
;
335 free_block(bufmgr_fake
, block
);
343 * Removes all objects from the fenced list older than the given fence.
345 static int clear_fenced(dri_bufmgr_fake
*bufmgr_fake
,
346 unsigned int fence_cookie
)
348 struct block
*block
, *tmp
;
351 foreach_s(block
, tmp
, &bufmgr_fake
->fenced
) {
352 assert(block
->fenced
);
354 if (_fence_test(bufmgr_fake
, block
->fence
)) {
359 DBG("delayed free: offset %x sz %x\n",
360 block
->mem
->ofs
, block
->mem
->size
);
361 remove_from_list(block
);
362 mmFreeMem(block
->mem
);
366 DBG("return to lru: offset %x sz %x\n",
367 block
->mem
->ofs
, block
->mem
->size
);
368 move_to_tail(&bufmgr_fake
->lru
, block
);
374 /* Blocks are ordered by fence, so if one fails, all from
375 * here will fail also:
381 /* Also check the referenced list:
383 foreach_s(block
, tmp
, &bufmgr_fake
->referenced
) {
384 if (block
->fenced
&& _fence_test(bufmgr_fake
, block
->fence
)) {
389 DBG("%s: %d\n", __FUNCTION__
, ret
);
393 static void fence_blocks(dri_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
395 struct block
*block
, *tmp
;
397 foreach_s (block
, tmp
, &bufmgr_fake
->on_hardware
) {
398 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
399 block
->mem
->size
, block
->bo
, fence
);
400 block
->fence
= fence
;
402 block
->on_hardware
= 0;
405 /* Move to tail of pending list here
407 move_to_tail(&bufmgr_fake
->fenced
, block
);
410 /* Also check the referenced list:
412 foreach_s (block
, tmp
, &bufmgr_fake
->referenced
) {
413 if (block
->on_hardware
) {
414 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
415 block
->mem
->size
, block
->bo
, fence
);
417 block
->fence
= fence
;
418 block
->on_hardware
= 0;
423 assert(is_empty_list(&bufmgr_fake
->on_hardware
));
426 static GLboolean
evict_and_alloc_block(dri_bo
*bo
)
428 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
429 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
431 assert(bo_fake
->block
== NULL
);
433 /* Search for already free memory:
438 /* If we're not thrashing, allow lru eviction to dig deeper into
439 * recently used textures. We'll probably be thrashing soon:
441 if (!bufmgr_fake
->thrashing
) {
442 while (evict_lru(bufmgr_fake
, 0))
447 /* Keep thrashing counter alive?
449 if (bufmgr_fake
->thrashing
)
450 bufmgr_fake
->thrashing
= 20;
452 /* Wait on any already pending fences - here we are waiting for any
453 * freed memory that has been submitted to hardware and fenced to
456 while (!is_empty_list(&bufmgr_fake
->fenced
)) {
457 GLuint fence
= bufmgr_fake
->fenced
.next
->fence
;
458 _fence_wait_internal(bufmgr_fake
, fence
);
464 if (!is_empty_list(&bufmgr_fake
->on_hardware
)) {
465 while (!is_empty_list(&bufmgr_fake
->fenced
)) {
466 GLuint fence
= bufmgr_fake
->fenced
.next
->fence
;
467 _fence_wait_internal(bufmgr_fake
, fence
);
470 if (!bufmgr_fake
->thrashing
) {
473 bufmgr_fake
->thrashing
= 20;
479 while (evict_mru(bufmgr_fake
))
483 DBG("%s 0x%x bytes failed\n", __FUNCTION__
, bo
->size
);
485 assert(is_empty_list(&bufmgr_fake
->on_hardware
));
486 assert(is_empty_list(&bufmgr_fake
->fenced
));
491 /***********************************************************************
496 * Wait for hardware idle by emitting a fence and waiting for it.
499 dri_bufmgr_fake_wait_idle(dri_bufmgr_fake
*bufmgr_fake
)
503 cookie
= bufmgr_fake
->fence_emit(bufmgr_fake
->driver_priv
);
504 _fence_wait_internal(bufmgr_fake
, cookie
);
507 /* Specifically ignore texture memory sharing.
508 * -- just evict everything
509 * -- and wait for idle
512 dri_bufmgr_fake_contended_lock_take(dri_bufmgr
*bufmgr
)
514 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bufmgr
;
516 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
518 struct block
*block
, *tmp
;
520 assert(is_empty_list(&bufmgr_fake
->referenced
));
522 bufmgr_fake
->need_fence
= 1;
523 bufmgr_fake
->fail
= 0;
525 /* Wait for hardware idle. We don't know where acceleration has been
526 * happening, so we'll need to wait anyway before letting anything get
527 * put on the card again.
529 dri_bufmgr_fake_wait_idle(bufmgr_fake
);
531 assert(is_empty_list(&bufmgr_fake
->fenced
));
532 assert(is_empty_list(&bufmgr_fake
->on_hardware
));
534 foreach_s(block
, tmp
, &bufmgr_fake
->lru
) {
535 assert(_fence_test(bufmgr_fake
, block
->fence
));
536 set_dirty(block
->bo
);
539 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
543 dri_fake_alloc(dri_bufmgr
*bufmgr
, const char *name
,
544 unsigned long size
, unsigned int alignment
, unsigned int flags
,
547 dri_bufmgr_fake
*bufmgr_fake
;
548 dri_bo_fake
*bo_fake
;
550 bufmgr_fake
= (dri_bufmgr_fake
*)bufmgr
;
552 bo_fake
= calloc(1, sizeof(*bo_fake
));
556 bo_fake
->bo
.size
= size
;
557 bo_fake
->bo
.offset
= -1;
558 bo_fake
->bo
.virtual = NULL
;
559 bo_fake
->bo
.bufmgr
= bufmgr
;
560 bo_fake
->refcount
= 1;
562 /* Alignment must be a power of two */
563 assert((alignment
& (alignment
- 1)) == 0);
566 bo_fake
->alignment
= alignment
;
567 bo_fake
->id
= ++bufmgr_fake
->buf_nr
;
568 bo_fake
->name
= name
;
569 bo_fake
->flags
= flags
;
570 bo_fake
->is_static
= GL_FALSE
;
576 dri_fake_alloc_static(dri_bufmgr
*bufmgr
, const char *name
,
577 unsigned long offset
, unsigned long size
, void *virtual,
578 unsigned int flags
, unsigned int hint
)
580 dri_bufmgr_fake
*bufmgr_fake
;
581 dri_bo_fake
*bo_fake
;
583 bufmgr_fake
= (dri_bufmgr_fake
*)bufmgr
;
585 bo_fake
= calloc(1, sizeof(*bo_fake
));
589 bo_fake
->bo
.size
= size
;
590 bo_fake
->bo
.offset
= offset
;
591 bo_fake
->bo
.virtual = virtual;
592 bo_fake
->bo
.bufmgr
= bufmgr
;
593 bo_fake
->refcount
= 1;
594 bo_fake
->name
= name
;
595 bo_fake
->flags
= flags
;
596 bo_fake
->is_static
= GL_TRUE
;
602 dri_fake_bo_reference(dri_bo
*bo
)
604 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
605 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
607 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
609 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
613 dri_fake_bo_unreference(dri_bo
*bo
)
615 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
616 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
621 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
622 if (--bo_fake
->refcount
== 0) {
623 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
624 /* No remaining references, so free it */
626 free_block(bufmgr_fake
, bo_fake
->block
);
627 free_backing_store(bo
);
631 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
635 * Map a buffer into bo->virtual, allocating either card memory space (If
636 * BM_NO_BACKING_STORE or DRM_BO_FLAG_NO_EVICT) or backing store, as necessary.
639 dri_fake_bo_map(dri_bo
*bo
, GLboolean write_enable
)
641 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
642 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
644 /* Static buffers are always mapped. */
645 if (bo_fake
->is_static
)
648 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
650 DBG("bmMapBuffer %d\n", bo_fake
->id
);
652 if (bo
->virtual != NULL
) {
653 _mesa_printf("%s: already mapped\n", __FUNCTION__
);
656 else if (bo_fake
->flags
& (BM_NO_BACKING_STORE
|DRM_BO_FLAG_NO_EVICT
)) {
658 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
659 DBG("%s: alloc failed\n", __FUNCTION__
);
660 bufmgr_fake
->fail
= 1;
661 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
665 assert(bo_fake
->block
);
668 if (!(bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
669 dri_bufmgr_fake_wait_idle(bufmgr_fake
);
671 bo
->virtual = bo_fake
->block
->virtual;
678 if (bo_fake
->backing_store
== 0)
679 alloc_backing_store(bo
);
681 bo
->virtual = bo_fake
->backing_store
;
684 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
689 dri_fake_bo_unmap(dri_bo
*bo
)
691 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
693 /* Static buffers are always mapped. */
694 if (bo_fake
->is_static
)
706 dri_fake_bo_validate(dri_bo
*bo
, unsigned int flags
)
708 dri_bufmgr_fake
*bufmgr_fake
;
709 dri_bo_fake
*bo_fake
= (dri_bo_fake
*)bo
;
711 /* XXX: Sanity-check whether we've already validated this one under
712 * different flags. See drmAddValidateItem().
715 bufmgr_fake
= (dri_bufmgr_fake
*)bo
->bufmgr
;
717 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
719 /* Allocate the card memory */
720 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
721 bufmgr_fake
->fail
= 1;
722 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
726 assert(bo_fake
->block
);
727 assert(bo_fake
->block
->bo
== &bo_fake
->bo
);
729 DBG("Add buf %d (block %p, dirty %d) to referenced list\n",
730 bo_fake
->id
, bo_fake
->block
, bo_fake
->dirty
);
732 move_to_tail(&bufmgr_fake
->referenced
, bo_fake
->block
);
733 bo_fake
->block
->referenced
= 1;
735 bo
->offset
= bo_fake
->block
->mem
->ofs
;
737 /* Upload the buffer contents if necessary */
738 if (bo_fake
->dirty
) {
739 DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", bo_fake
->id
,
740 bo_fake
->name
, bo
->size
, block
->mem
->ofs
);
742 assert(!(bo_fake
->flags
&
743 (BM_NO_BACKING_STORE
|DRM_BO_FLAG_NO_EVICT
)));
745 /* Actually, should be able to just wait for a fence on the memory,
746 * which we would be tracking when we free it. Waiting for idle is
747 * a sufficiently large hammer for now.
749 dri_bufmgr_fake_wait_idle(bufmgr_fake
);
751 memcpy(bo_fake
->block
->virtual, bo_fake
->backing_store
, bo
->size
);
753 bo_fake
->block
->referenced
= 0;
754 bo_fake
->block
->on_hardware
= 1;
755 move_to_tail(&bufmgr_fake
->on_hardware
, bo_fake
->block
);
758 bufmgr_fake
->need_fence
= 1;
760 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
766 dri_fake_fence_validated(dri_bufmgr
*bufmgr
, const char *name
,
769 dri_fence_fake
*fence_fake
;
770 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)bufmgr
;
773 fence_fake
= malloc(sizeof(*fence_fake
));
777 fence_fake
->refcount
= 1;
778 fence_fake
->name
= name
;
779 fence_fake
->flushed
= flushed
;
780 fence_fake
->fence
.bufmgr
= bufmgr
;
782 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
783 cookie
= _fence_emit_internal(bufmgr_fake
);
784 fence_fake
->fence_cookie
= cookie
;
785 fence_blocks(bufmgr_fake
, cookie
);
786 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
788 return &fence_fake
->fence
;
792 dri_fake_fence_reference(dri_fence
*fence
)
794 dri_fence_fake
*fence_fake
= (dri_fence_fake
*)fence
;
795 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)fence
->bufmgr
;
797 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
798 ++fence_fake
->refcount
;
799 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
803 dri_fake_fence_unreference(dri_fence
*fence
)
805 dri_fence_fake
*fence_fake
= (dri_fence_fake
*)fence
;
806 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)fence
->bufmgr
;
811 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
812 if (--fence_fake
->refcount
== 0) {
813 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
817 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
821 dri_fake_fence_wait(dri_fence
*fence
)
823 dri_fence_fake
*fence_fake
= (dri_fence_fake
*)fence
;
824 dri_bufmgr_fake
*bufmgr_fake
= (dri_bufmgr_fake
*)fence
->bufmgr
;
826 _glthread_LOCK_MUTEX(bufmgr_fake
->mutex
);
827 _fence_wait_internal(bufmgr_fake
->driver_priv
, fence_fake
->fence_cookie
);
828 _glthread_UNLOCK_MUTEX(bufmgr_fake
->mutex
);
832 dri_bufmgr_fake_init(unsigned long low_offset
, void *low_virtual
,
834 unsigned int (*fence_emit
)(void *private),
835 int (*fence_wait
)(void *private, unsigned int cookie
),
838 dri_bufmgr_fake
*bufmgr_fake
;
840 bufmgr_fake
= malloc(sizeof(*bufmgr_fake
));
842 /* Initialize allocator */
843 make_empty_list(&bufmgr_fake
->referenced
);
844 make_empty_list(&bufmgr_fake
->fenced
);
845 make_empty_list(&bufmgr_fake
->on_hardware
);
846 make_empty_list(&bufmgr_fake
->lru
);
848 bufmgr_fake
->low_offset
= low_offset
;
849 bufmgr_fake
->virtual = low_virtual
;
850 bufmgr_fake
->size
= size
;
851 bufmgr_fake
->heap
= mmInit(low_offset
, size
);
853 _glthread_INIT_MUTEX(bufmgr_fake
->mutex
);
855 /* Hook in methods */
856 bufmgr_fake
->bufmgr
.bo_alloc
= dri_fake_alloc
;
857 bufmgr_fake
->bufmgr
.bo_alloc_static
= dri_fake_alloc_static
;
858 bufmgr_fake
->bufmgr
.bo_reference
= dri_fake_bo_reference
;
859 bufmgr_fake
->bufmgr
.bo_unreference
= dri_fake_bo_unreference
;
860 bufmgr_fake
->bufmgr
.bo_map
= dri_fake_bo_map
;
861 bufmgr_fake
->bufmgr
.bo_unmap
= dri_fake_bo_unmap
;
862 bufmgr_fake
->bufmgr
.bo_validate
= dri_fake_bo_validate
;
863 bufmgr_fake
->bufmgr
.fence_validated
= dri_fake_fence_validated
;
864 bufmgr_fake
->bufmgr
.fence_wait
= dri_fake_fence_wait
;
865 bufmgr_fake
->bufmgr
.fence_reference
= dri_fake_fence_reference
;
866 bufmgr_fake
->bufmgr
.fence_unreference
= dri_fake_fence_unreference
;
868 bufmgr_fake
->fence_emit
= fence_emit
;
869 bufmgr_fake
->fence_wait
= fence_wait
;
870 bufmgr_fake
->driver_priv
= driver_priv
;
872 return &bufmgr_fake
->bufmgr
;