1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
42 #include "simple_list.h"
50 #define BM_NO_BACKING_STORE 0x2000
51 #define BM_NO_FENCE_SUBDATA 0x4000
54 static int check_fenced( struct intel_context
*intel
);
56 static int nr_attach
= 0;
58 /* Wrapper around mm.c's mem_block, which understands that you must
59 * wait for fences to expire before memory can be freed. This is
60 * specific to our use of memcpy for uploads - an upload that was
61 * processed through the command queue wouldn't need to care about
65 struct block
*next
, *prev
;
66 struct pool
*pool
; /* BM_MEM_AGP */
67 struct mem_block
*mem
; /* BM_MEM_AGP */
69 unsigned referenced
:1;
70 unsigned on_hardware
:1;
74 unsigned fence
; /* BM_MEM_AGP, Split to read_fence, write_fence */
82 unsigned id
; /* debug only */
88 unsigned alignment
:13;
93 void (*invalidate_cb
)( struct intel_context
*, void * );
100 struct buffer
*static_buffer
;
102 struct mem_block
*heap
;
104 struct block lru
; /* only allocated, non-fence-pending blocks here */
108 _glthread_Mutex mutex
; /**< for thread safety */
109 struct pool pool
[BM_POOL_MAX
];
112 unsigned buf_nr
; /* for generating ids */
114 struct block referenced
; /* after bmBufferOffset */
115 struct block on_hardware
; /* after bmValidateBuffers */
116 struct block fenced
; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
117 /* then to pool->lru or free() */
121 unsigned free_on_hardware
;
124 unsigned need_fence
:1;
127 #define MAXFENCE 0x7fffffff
129 static GLboolean
FENCE_LTE( unsigned a
, unsigned b
)
134 if (a
< b
&& b
- a
< (1<<24))
137 if (a
> b
&& MAXFENCE
- a
+ b
< (1<<24))
143 int bmTestFence( struct intel_context
*intel
, unsigned fence
)
145 /* Slight problem with wrap-around:
147 return fence
== 0 || FENCE_LTE(fence
, intel
->sarea
->last_dispatch
);
151 int dolock = nr_attach > 1; \
152 if (dolock) _glthread_LOCK_MUTEX(bm->mutex)
155 if (dolock) _glthread_UNLOCK_MUTEX(bm->mutex)
159 static GLboolean
alloc_from_pool( struct intel_context
*intel
,
163 struct bufmgr
*bm
= intel
->bm
;
164 struct pool
*pool
= &bm
->pool
[pool_nr
];
165 struct block
*block
= (struct block
*)calloc(sizeof *block
, 1);
166 GLuint sz
, align
= (1<<buf
->alignment
);
171 sz
= (buf
->size
+ align
-1) & ~(align
-1);
173 block
->mem
= mmAllocMem(pool
->heap
,
181 make_empty_list(block
);
183 /* Insert at head or at tail???
185 insert_at_tail(&pool
->lru
, block
);
188 block
->virtual = pool
->virtual + block
->mem
->ofs
;
203 /* Release the card storage associated with buf:
205 static void free_block( struct intel_context
*intel
, struct block
*block
)
207 DBG("free block %p\n", block
);
214 if (block
->referenced
) {
215 _mesa_printf("tried to free block on referenced list\n");
218 else if (block
->on_hardware
) {
220 intel
->bm
->free_on_hardware
+= block
->mem
->size
;
222 else if (block
->fenced
) {
226 DBG(" - free immediately\n");
227 remove_from_list(block
);
229 mmFreeMem(block
->mem
);
235 static void alloc_backing_store( struct intel_context
*intel
, struct buffer
*buf
)
237 assert(!buf
->backing_store
);
238 assert(!(buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)));
240 buf
->backing_store
= ALIGN_MALLOC(buf
->size
, 64);
243 static void free_backing_store( struct intel_context
*intel
, struct buffer
*buf
)
245 assert(!(buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)));
247 if (buf
->backing_store
) {
248 ALIGN_FREE(buf
->backing_store
);
249 buf
->backing_store
= NULL
;
258 static void set_dirty( struct intel_context
*intel
,
261 if (buf
->flags
& BM_NO_BACKING_STORE
)
262 buf
->invalidate_cb(intel
, buf
->invalidate_ptr
);
264 assert(!(buf
->flags
& BM_NO_EVICT
));
266 DBG("set_dirty - buf %d\n", buf
->id
);
271 static int evict_lru( struct intel_context
*intel
, GLuint max_fence
, GLuint
*pool
)
273 struct bufmgr
*bm
= intel
->bm
;
274 struct block
*block
, *tmp
;
277 DBG("%s\n", __FUNCTION__
);
279 for (i
= 0; i
< bm
->nr_pools
; i
++) {
280 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
281 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
284 (block
->buf
->flags
& BM_NO_FENCE_SUBDATA
))
287 if (block
->fence
&& max_fence
&&
288 !FENCE_LTE(block
->fence
, max_fence
))
291 set_dirty(intel
, block
->buf
);
292 block
->buf
->block
= NULL
;
294 free_block(intel
, block
);
306 #define foreach_s_rev(ptr, t, list) \
307 for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
309 static int evict_mru( struct intel_context
*intel
, GLuint
*pool
)
311 struct bufmgr
*bm
= intel
->bm
;
312 struct block
*block
, *tmp
;
315 DBG("%s\n", __FUNCTION__
);
317 for (i
= 0; i
< bm
->nr_pools
; i
++) {
318 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
319 foreach_s_rev(block
, tmp
, &bm
->pool
[i
].lru
) {
322 (block
->buf
->flags
& BM_NO_FENCE_SUBDATA
))
325 set_dirty(intel
, block
->buf
);
326 block
->buf
->block
= NULL
;
328 free_block(intel
, block
);
340 static int check_fenced( struct intel_context
*intel
)
342 struct bufmgr
*bm
= intel
->bm
;
343 struct block
*block
, *tmp
;
346 foreach_s(block
, tmp
, &bm
->fenced
) {
347 assert(block
->fenced
);
349 if (bmTestFence(intel
, block
->fence
)) {
354 DBG("delayed free: offset %x sz %x\n", block
->mem
->ofs
, block
->mem
->size
);
355 remove_from_list(block
);
356 mmFreeMem(block
->mem
);
360 DBG("return to lru: offset %x sz %x\n", block
->mem
->ofs
, block
->mem
->size
);
361 move_to_tail(&block
->pool
->lru
, block
);
367 /* Blocks are ordered by fence, so if one fails, all from
368 * here will fail also:
374 /* Also check the referenced list:
376 foreach_s(block
, tmp
, &bm
->referenced
) {
378 bmTestFence(intel
, block
->fence
)) {
384 DBG("%s: %d\n", __FUNCTION__
, ret
);
390 static void fence_blocks( struct intel_context
*intel
,
393 struct bufmgr
*bm
= intel
->bm
;
394 struct block
*block
, *tmp
;
396 foreach_s (block
, tmp
, &bm
->on_hardware
) {
397 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
398 block
->mem
->size
, block
->buf
, fence
);
399 block
->fence
= fence
;
401 block
->on_hardware
= 0;
404 /* Move to tail of pending list here
406 move_to_tail(&bm
->fenced
, block
);
409 /* Also check the referenced list:
411 foreach_s (block
, tmp
, &bm
->referenced
) {
412 if (block
->on_hardware
) {
413 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
414 block
->mem
->size
, block
->buf
, fence
);
416 block
->fence
= fence
;
417 block
->on_hardware
= 0;
423 bm
->last_fence
= fence
;
424 assert(is_empty_list(&bm
->on_hardware
));
430 static GLboolean
alloc_block( struct intel_context
*intel
,
433 struct bufmgr
*bm
= intel
->bm
;
436 assert(intel
->locked
);
438 DBG("%s 0x%x bytes (%s)\n", __FUNCTION__
, buf
->size
, buf
->name
);
440 for (i
= 0; i
< bm
->nr_pools
; i
++) {
441 if (!(bm
->pool
[i
].flags
& BM_NO_ALLOC
) &&
442 alloc_from_pool(intel
, i
, buf
)) {
444 DBG("%s --> 0x%x (sz %x)\n", __FUNCTION__
,
445 buf
->block
->mem
->ofs
, buf
->block
->mem
->size
);
451 DBG("%s --> fail\n", __FUNCTION__
);
456 static GLboolean
evict_and_alloc_block( struct intel_context
*intel
,
460 struct bufmgr
*bm
= intel
->bm
;
462 assert(buf
->block
== NULL
);
464 /* Put a cap on the amount of free memory we'll allow to accumulate
465 * before emitting a fence.
467 if (bm
->free_on_hardware
> 1 * 1024 * 1024) {
468 DBG("fence for free space: %x\n", bm
->free_on_hardware
);
472 /* Search for already free memory:
474 if (alloc_block(intel
, buf
))
477 /* Look for memory that may have become free:
479 if (check_fenced(intel
) &&
480 alloc_block(intel
, buf
))
483 /* Look for memory blocks not used for >1 frame:
485 while (evict_lru(intel
, intel
->second_last_swap_fence
, &pool
))
486 if (alloc_from_pool(intel
, pool
, buf
))
489 /* If we're not thrashing, allow lru eviction to dig deeper into
490 * recently used textures. We'll probably be thrashing soon:
492 if (!intel
->thrashing
) {
493 while (evict_lru(intel
, 0, &pool
))
494 if (alloc_from_pool(intel
, pool
, buf
))
498 /* Keep thrashing counter alive?
500 if (intel
->thrashing
)
501 intel
->thrashing
= 20;
503 /* Wait on any already pending fences - here we are waiting for any
504 * freed memory that has been submitted to hardware and fenced to
507 while (!is_empty_list(&bm
->fenced
)) {
508 GLuint fence
= bm
->fenced
.next
->fence
;
509 bmFinishFence(intel
, fence
);
511 if (alloc_block(intel
, buf
))
518 if (!is_empty_list(&bm
->on_hardware
)) {
521 while (!is_empty_list(&bm
->fenced
)) {
522 GLuint fence
= bm
->fenced
.next
->fence
;
523 bmFinishFence(intel
, fence
);
526 if (!intel
->thrashing
) {
529 intel
->thrashing
= 20;
531 if (alloc_block(intel
, buf
))
535 while (evict_mru(intel
, &pool
))
536 if (alloc_from_pool(intel
, pool
, buf
))
539 DBG("%s 0x%x bytes failed\n", __FUNCTION__
, buf
->size
);
541 assert(is_empty_list(&bm
->on_hardware
));
542 assert(is_empty_list(&bm
->fenced
));
556 /***********************************************************************
561 /* The initialization functions are skewed in the fake implementation.
562 * This call would be to attach to an existing manager, rather than to
563 * create a local one.
565 struct bufmgr
*bm_fake_intel_Attach( struct intel_context
*intel
)
567 _glthread_DECLARE_STATIC_MUTEX(initMutex
);
568 static struct bufmgr bm
;
570 /* This function needs a mutex of its own...
572 _glthread_LOCK_MUTEX(initMutex
);
574 if (nr_attach
== 0) {
575 _glthread_INIT_MUTEX(bm
.mutex
);
577 make_empty_list(&bm
.referenced
);
578 make_empty_list(&bm
.fenced
);
579 make_empty_list(&bm
.on_hardware
);
581 /* The context id of any of the share group. This won't be used
582 * in communication with the kernel, so it doesn't matter if
583 * this context is eventually deleted.
585 bm
.ctxId
= intel
->hHWContext
;
590 _glthread_UNLOCK_MUTEX(initMutex
);
597 /* The virtual pointer would go away in a true implementation.
599 int bmInitPool( struct intel_context
*intel
,
600 unsigned long low_offset
,
605 struct bufmgr
*bm
= intel
->bm
;
612 for (i
= 0; i
< bm
->nr_pools
; i
++) {
613 if (bm
->pool
[i
].low_offset
== low_offset
&&
614 bm
->pool
[i
].size
== size
) {
621 if (bm
->nr_pools
>= BM_POOL_MAX
)
626 DBG("bmInitPool %d low_offset %x sz %x\n",
627 i
, low_offset
, size
);
629 bm
->pool
[i
].low_offset
= low_offset
;
630 bm
->pool
[i
].size
= size
;
631 bm
->pool
[i
].heap
= mmInit( low_offset
, size
);
632 bm
->pool
[i
].virtual = low_virtual
- low_offset
;
633 bm
->pool
[i
].flags
= flags
;
635 make_empty_list(&bm
->pool
[i
].lru
);
645 static struct buffer
*do_GenBuffer(struct intel_context
*intel
, const char *name
, int align
)
647 struct bufmgr
*bm
= intel
->bm
;
648 struct buffer
*buf
= calloc(sizeof(*buf
), 1);
650 buf
->id
= ++bm
->buf_nr
;
652 buf
->alignment
= align
;
653 buf
->flags
= BM_MEM_AGP
|BM_MEM_VRAM
|BM_MEM_LOCAL
;
659 void *bmFindVirtual( struct intel_context
*intel
,
663 struct bufmgr
*bm
= intel
->bm
;
666 for (i
= 0; i
< bm
->nr_pools
; i
++)
667 if (offset
>= bm
->pool
[i
].low_offset
&&
668 offset
+ sz
<= bm
->pool
[i
].low_offset
+ bm
->pool
[i
].size
)
669 return bm
->pool
[i
].virtual + offset
;
675 void bmGenBuffers(struct intel_context
*intel
,
676 const char *name
, unsigned n
,
677 struct buffer
**buffers
,
680 struct bufmgr
*bm
= intel
->bm
;
685 for (i
= 0; i
< n
; i
++)
686 buffers
[i
] = do_GenBuffer(intel
, name
, align
);
692 void bmDeleteBuffers(struct intel_context
*intel
, unsigned n
, struct buffer
**buffers
)
694 struct bufmgr
*bm
= intel
->bm
;
700 for (i
= 0; i
< n
; i
++) {
701 struct buffer
*buf
= buffers
[i
];
703 if (buf
&& buf
->block
)
704 free_block(intel
, buf
->block
);
716 /* Hook to inform faked buffer manager about fixed-position
717 * front,depth,back buffers. These may move to a fully memory-managed
718 * scheme, or they may continue to be managed as is. It will probably
719 * be useful to pass a fixed offset here one day.
721 struct buffer
*bmGenBufferStatic(struct intel_context
*intel
,
724 struct bufmgr
*bm
= intel
->bm
;
728 assert(bm
->pool
[pool
].flags
& BM_NO_EVICT
);
729 assert(bm
->pool
[pool
].flags
& BM_NO_MOVE
);
731 if (bm
->pool
[pool
].static_buffer
)
732 buf
= bm
->pool
[pool
].static_buffer
;
734 buf
= do_GenBuffer(intel
, "static", 12);
736 bm
->pool
[pool
].static_buffer
= buf
;
739 buf
->size
= bm
->pool
[pool
].size
;
740 buf
->flags
= bm
->pool
[pool
].flags
;
743 if (!alloc_from_pool(intel
, pool
, buf
))
752 static void wait_quiescent(struct intel_context
*intel
,
755 if (block
->on_hardware
) {
756 assert(intel
->bm
->need_fence
);
758 assert(!block
->on_hardware
);
763 bmFinishFence(intel
, block
->fence
);
766 assert(!block
->on_hardware
);
767 assert(!block
->fenced
);
772 /* If buffer size changes, free and reallocate. Otherwise update in
775 int bmBufferData(struct intel_context
*intel
,
781 struct bufmgr
*bm
= intel
->bm
;
786 DBG("bmBufferData %d sz 0x%x data: %p\n", buf
->id
, size
, data
);
788 assert(!buf
->mapped
);
791 struct block
*block
= buf
->block
;
793 /* Optimistic check to see if we can reuse the block -- not
794 * required for correctness:
799 if (block
->on_hardware
||
801 (buf
->size
&& buf
->size
!= size
) ||
804 assert(!block
->referenced
);
806 free_block(intel
, block
);
814 assert (buf
->block
->mem
->size
>= size
);
817 if (buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)) {
819 assert(intel
->locked
|| data
== NULL
);
822 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
828 wait_quiescent(intel
, buf
->block
);
830 DBG("bmBufferData %d offset 0x%x sz 0x%x\n",
831 buf
->id
, buf
->block
->mem
->ofs
, size
);
833 assert(buf
->block
->virtual == buf
->block
->pool
->virtual + buf
->block
->mem
->ofs
);
835 do_memcpy(buf
->block
->virtual, data
, size
);
840 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
841 set_dirty(intel
, buf
);
842 free_backing_store(intel
, buf
);
845 alloc_backing_store(intel
, buf
);
846 do_memcpy(buf
->backing_store
, data
, size
);
856 /* Update the buffer in place, in whatever space it is currently resident:
858 int bmBufferSubData(struct intel_context
*intel
,
864 struct bufmgr
*bm
= intel
->bm
;
872 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf
->id
, offset
, size
);
874 assert(offset
+size
<= buf
->size
);
876 if (buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)) {
878 assert(intel
->locked
);
880 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
886 if (!(buf
->flags
& BM_NO_FENCE_SUBDATA
))
887 wait_quiescent(intel
, buf
->block
);
891 do_memcpy(buf
->block
->virtual + offset
, data
, size
);
894 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
895 set_dirty(intel
, buf
);
897 if (buf
->backing_store
== NULL
)
898 alloc_backing_store(intel
, buf
);
900 do_memcpy(buf
->backing_store
+ offset
, data
, size
);
908 unsigned bmBufferOffset(struct intel_context
*intel
,
911 struct bufmgr
*bm
= intel
->bm
;
916 assert(intel
->locked
);
919 !evict_and_alloc_block(intel
, buf
)) {
925 assert(buf
->block
->buf
== buf
);
927 DBG("Add buf %d (block %p, dirty %d) to referenced list\n", buf
->id
, buf
->block
,
930 move_to_tail(&bm
->referenced
, buf
->block
);
931 buf
->block
->referenced
= 1;
933 retval
= buf
->block
->mem
->ofs
;
943 /* Extract data from the buffer:
945 void bmBufferGetSubData(struct intel_context
*intel
,
951 struct bufmgr
*bm
= intel
->bm
;
955 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf
->id
, offset
, size
);
957 if (buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)) {
958 if (buf
->block
&& size
) {
959 wait_quiescent(intel
, buf
->block
);
960 do_memcpy(data
, buf
->block
->virtual + offset
, size
);
964 if (buf
->backing_store
&& size
) {
965 do_memcpy(data
, buf
->backing_store
+ offset
, size
);
973 /* Return a pointer to whatever space the buffer is currently resident in:
975 void *bmMapBuffer( struct intel_context
*intel
,
979 struct bufmgr
*bm
= intel
->bm
;
984 DBG("bmMapBuffer %d\n", buf
->id
);
987 _mesa_printf("%s: already mapped\n", __FUNCTION__
);
990 else if (buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)) {
992 assert(intel
->locked
);
994 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
995 DBG("%s: alloc failed\n", __FUNCTION__
);
1003 if (!(buf
->flags
& BM_NO_FENCE_SUBDATA
))
1004 wait_quiescent(intel
, buf
->block
);
1007 retval
= buf
->block
->virtual;
1011 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
1012 set_dirty(intel
, buf
);
1014 if (buf
->backing_store
== 0)
1015 alloc_backing_store(intel
, buf
);
1018 retval
= buf
->backing_store
;
1025 void bmUnmapBuffer( struct intel_context
*intel
, struct buffer
*buf
)
1027 struct bufmgr
*bm
= intel
->bm
;
1031 DBG("bmUnmapBuffer %d\n", buf
->id
);
1040 /* This is the big hack that turns on BM_NO_BACKING_STORE. Basically
1041 * says that an external party will maintain the backing store, eg
1042 * Mesa's local copy of texture data.
1044 void bmBufferSetInvalidateCB(struct intel_context
*intel
,
1046 void (*invalidate_cb
)( struct intel_context
*, void *ptr
),
1048 GLboolean dont_fence_subdata
)
1050 struct bufmgr
*bm
= intel
->bm
;
1054 if (buf
->backing_store
)
1055 free_backing_store(intel
, buf
);
1057 buf
->flags
|= BM_NO_BACKING_STORE
;
1059 if (dont_fence_subdata
)
1060 buf
->flags
|= BM_NO_FENCE_SUBDATA
;
1062 DBG("bmBufferSetInvalidateCB set buf %d dirty\n", buf
->id
);
1064 buf
->invalidate_cb
= invalidate_cb
;
1065 buf
->invalidate_ptr
= ptr
;
1067 /* Note that it is invalid right from the start. Also note
1068 * invalidate_cb is called with the bufmgr locked, so cannot
1069 * itself make bufmgr calls.
1071 invalidate_cb( intel
, ptr
);
1082 /* This is only protected against thread interactions by the DRI lock
1083 * and the policy of ensuring that all dma is flushed prior to
1084 * releasing that lock. Otherwise you might have two threads building
1085 * up a list of buffers to validate at once.
1087 int bmValidateBuffers( struct intel_context
*intel
)
1089 struct bufmgr
*bm
= intel
->bm
;
1094 DBG("%s fail %d\n", __FUNCTION__
, bm
->fail
);
1095 assert(intel
->locked
);
1098 struct block
*block
, *tmp
;
1100 foreach_s(block
, tmp
, &bm
->referenced
) {
1101 struct buffer
*buf
= block
->buf
;
1103 DBG("Validate buf %d / block %p / dirty %d\n", buf
->id
, block
, buf
->dirty
);
1105 /* Upload the buffer contents if necessary:
1108 DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", buf
->id
,
1109 buf
->name
, buf
->size
, block
->mem
->ofs
);
1111 assert(!(buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)));
1113 wait_quiescent(intel
, buf
->block
);
1115 do_memcpy(buf
->block
->virtual,
1122 block
->referenced
= 0;
1123 block
->on_hardware
= 1;
1124 move_to_tail(&bm
->on_hardware
, block
);
1130 retval
= bm
->fail
? -1 : 0;
1136 DBG("%s failed\n", __FUNCTION__
);
1144 void bmReleaseBuffers( struct intel_context
*intel
)
1146 struct bufmgr
*bm
= intel
->bm
;
1150 struct block
*block
, *tmp
;
1152 foreach_s (block
, tmp
, &bm
->referenced
) {
1154 DBG("remove block %p from referenced list\n", block
);
1156 if (block
->on_hardware
) {
1157 /* Return to the on-hardware list.
1159 move_to_tail(&bm
->on_hardware
, block
);
1161 else if (block
->fenced
) {
1164 /* Hmm - have to scan the fenced list to insert the
1165 * buffers in order. This is O(nm), but rare and the
1168 foreach (s
, &bm
->fenced
) {
1169 if (FENCE_LTE(block
->fence
, s
->fence
))
1173 move_to_tail(s
, block
);
1176 /* Return to the lru list:
1178 move_to_tail(&block
->pool
->lru
, block
);
1181 block
->referenced
= 0;
1188 /* This functionality is used by the buffer manager, not really sure
1189 * if we need to be exposing it in this way, probably libdrm will
1190 * offer equivalent calls.
1192 * For now they can stay, but will likely change/move before final:
1194 unsigned bmSetFence( struct intel_context
*intel
)
1196 assert(intel
->locked
);
1198 /* Emit MI_FLUSH here:
1200 if (intel
->bm
->need_fence
) {
1202 /* Emit a flush without using a batchbuffer. Can't rely on the
1203 * batchbuffer at this level really. Would really prefer that
1204 * the IRQ ioctly emitted the flush at the same time.
1207 dword
[0] = intel
->vtbl
.flush_cmd();
1209 intel_cmd_ioctl(intel
, (char *)&dword
, sizeof(dword
));
1211 intel
->bm
->last_fence
= intelEmitIrqLocked( intel
);
1213 fence_blocks(intel
, intel
->bm
->last_fence
);
1215 intel
->vtbl
.note_fence(intel
, intel
->bm
->last_fence
);
1216 intel
->bm
->need_fence
= 0;
1218 if (intel
->thrashing
) {
1220 if (!intel
->thrashing
)
1221 DBG("not thrashing\n");
1224 intel
->bm
->free_on_hardware
= 0;
1227 return intel
->bm
->last_fence
;
1230 unsigned bmSetFenceLock( struct intel_context
*intel
)
1234 last
= bmSetFence(intel
);
1238 unsigned bmLockAndFence( struct intel_context
*intel
)
1240 if (intel
->bm
->need_fence
) {
1241 LOCK_HARDWARE(intel
);
1245 UNLOCK_HARDWARE(intel
);
1248 return intel
->bm
->last_fence
;
1252 void bmFinishFence( struct intel_context
*intel
, unsigned fence
)
1254 if (!bmTestFence(intel
, fence
)) {
1255 DBG("...wait on fence %d\n", fence
);
1256 intelWaitIrq( intel
, fence
);
1258 assert(bmTestFence(intel
, fence
));
1259 check_fenced(intel
);
1262 void bmFinishFenceLock( struct intel_context
*intel
, unsigned fence
)
1265 bmFinishFence(intel
, fence
);
1270 /* Specifically ignore texture memory sharing.
1271 * -- just evict everything
1272 * -- and wait for idle
1274 void bm_fake_NotifyContendedLockTake( struct intel_context
*intel
)
1276 struct bufmgr
*bm
= intel
->bm
;
1280 struct block
*block
, *tmp
;
1283 assert(is_empty_list(&bm
->referenced
));
1287 bmFinishFence(intel
, bmSetFence(intel
));
1289 assert(is_empty_list(&bm
->fenced
));
1290 assert(is_empty_list(&bm
->on_hardware
));
1292 for (i
= 0; i
< bm
->nr_pools
; i
++) {
1293 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
1294 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
1295 assert(bmTestFence(intel
, block
->fence
));
1296 set_dirty(intel
, block
->buf
);
1306 void bmEvictAll( struct intel_context
*intel
)
1308 struct bufmgr
*bm
= intel
->bm
;
1312 struct block
*block
, *tmp
;
1315 DBG("%s\n", __FUNCTION__
);
1317 assert(is_empty_list(&bm
->referenced
));
1321 bmFinishFence(intel
, bmSetFence(intel
));
1323 assert(is_empty_list(&bm
->fenced
));
1324 assert(is_empty_list(&bm
->on_hardware
));
1326 for (i
= 0; i
< bm
->nr_pools
; i
++) {
1327 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
1328 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
1329 assert(bmTestFence(intel
, block
->fence
));
1330 set_dirty(intel
, block
->buf
);
1331 block
->buf
->block
= NULL
;
1333 free_block(intel
, block
);
1342 GLboolean
bmError( struct intel_context
*intel
)
1344 struct bufmgr
*bm
= intel
->bm
;
1357 GLuint
bmCtxId( struct intel_context
*intel
)
1359 return intel
->bm
->ctxId
;