1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
42 #include "simple_list.h"
50 #define BM_NO_BACKING_STORE 0x2000
51 #define BM_NO_FENCE_SUBDATA 0x4000
54 static int check_fenced( struct intel_context
*intel
);
56 static int nr_attach
= 0;
58 /* Wrapper around mm.c's mem_block, which understands that you must
59 * wait for fences to expire before memory can be freed. This is
60 * specific to our use of memcpy for uploads - an upload that was
61 * processed through the command queue wouldn't need to care about
65 struct block
*next
, *prev
;
66 struct pool
*pool
; /* BM_MEM_AGP */
67 struct mem_block
*mem
; /* BM_MEM_AGP */
69 unsigned referenced
:1;
70 unsigned on_hardware
:1;
74 unsigned fence
; /* BM_MEM_AGP, Split to read_fence, write_fence */
82 unsigned id
; /* debug only */
89 unsigned alignment
:13;
94 void (*invalidate_cb
)( struct intel_context
*, void * );
101 struct buffer
*static_buffer
;
103 struct mem_block
*heap
;
105 struct block lru
; /* only allocated, non-fence-pending blocks here */
109 _glthread_Mutex mutex
; /**< for thread safety */
110 struct pool pool
[BM_POOL_MAX
];
113 unsigned buf_nr
; /* for generating ids */
115 struct block referenced
; /* after bmBufferOffset */
116 struct block on_hardware
; /* after bmValidateBuffers */
117 struct block fenced
; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
118 /* then to pool->lru or free() */
122 unsigned free_on_hardware
;
125 unsigned need_fence
:1;
128 #define MAXFENCE 0x7fffffff
130 static GLboolean
FENCE_LTE( unsigned a
, unsigned b
)
135 if (a
< b
&& b
- a
< (1<<24))
138 if (a
> b
&& MAXFENCE
- a
+ b
< (1<<24))
144 int bmTestFence( struct intel_context
*intel
, unsigned fence
)
146 /* Slight problem with wrap-around:
148 return fence
== 0 || FENCE_LTE(fence
, intel
->sarea
->last_dispatch
);
152 int dolock = nr_attach > 1; \
153 if (dolock) _glthread_LOCK_MUTEX(bm->mutex)
156 if (dolock) _glthread_UNLOCK_MUTEX(bm->mutex)
160 static GLboolean
alloc_from_pool( struct intel_context
*intel
,
164 struct bufmgr
*bm
= intel
->bm
;
165 struct pool
*pool
= &bm
->pool
[pool_nr
];
166 struct block
*block
= (struct block
*)calloc(sizeof *block
, 1);
167 GLuint sz
, align
= (1<<buf
->alignment
);
172 sz
= (buf
->size
+ align
-1) & ~(align
-1);
174 block
->mem
= mmAllocMem(pool
->heap
,
182 make_empty_list(block
);
184 /* Insert at head or at tail???
186 insert_at_tail(&pool
->lru
, block
);
189 block
->virtual = pool
->virtual + block
->mem
->ofs
;
204 /* Release the card storage associated with buf:
206 static void free_block( struct intel_context
*intel
, struct block
*block
)
208 DBG("free block %p\n", block
);
215 if (block
->referenced
) {
216 _mesa_printf("tried to free block on referenced list\n");
219 else if (block
->on_hardware
) {
221 intel
->bm
->free_on_hardware
+= block
->mem
->size
;
223 else if (block
->fenced
) {
227 DBG(" - free immediately\n");
228 remove_from_list(block
);
230 mmFreeMem(block
->mem
);
236 static void alloc_backing_store( struct intel_context
*intel
, struct buffer
*buf
)
238 assert(!buf
->backing_store
);
239 assert(!(buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)));
241 buf
->backing_store
= ALIGN_MALLOC(buf
->size
, 64);
244 static void free_backing_store( struct intel_context
*intel
, struct buffer
*buf
)
246 assert(!(buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)));
248 if (buf
->backing_store
) {
249 ALIGN_FREE(buf
->backing_store
);
250 buf
->backing_store
= NULL
;
259 static void set_dirty( struct intel_context
*intel
,
262 if (buf
->flags
& BM_NO_BACKING_STORE
)
263 buf
->invalidate_cb(intel
, buf
->invalidate_ptr
);
265 assert(!(buf
->flags
& BM_NO_EVICT
));
267 DBG("set_dirty - buf %d\n", buf
->id
);
272 static int evict_lru( struct intel_context
*intel
, GLuint max_fence
, GLuint
*pool
)
274 struct bufmgr
*bm
= intel
->bm
;
275 struct block
*block
, *tmp
;
278 DBG("%s\n", __FUNCTION__
);
280 for (i
= 0; i
< bm
->nr_pools
; i
++) {
281 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
282 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
285 (block
->buf
->flags
& BM_NO_FENCE_SUBDATA
))
288 if (block
->fence
&& max_fence
&&
289 !FENCE_LTE(block
->fence
, max_fence
))
292 set_dirty(intel
, block
->buf
);
293 block
->buf
->block
= NULL
;
295 free_block(intel
, block
);
307 #define foreach_s_rev(ptr, t, list) \
308 for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
310 static int evict_mru( struct intel_context
*intel
, GLuint
*pool
)
312 struct bufmgr
*bm
= intel
->bm
;
313 struct block
*block
, *tmp
;
316 DBG("%s\n", __FUNCTION__
);
318 for (i
= 0; i
< bm
->nr_pools
; i
++) {
319 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
320 foreach_s_rev(block
, tmp
, &bm
->pool
[i
].lru
) {
323 (block
->buf
->flags
& BM_NO_FENCE_SUBDATA
))
326 set_dirty(intel
, block
->buf
);
327 block
->buf
->block
= NULL
;
329 free_block(intel
, block
);
341 static int check_fenced( struct intel_context
*intel
)
343 struct bufmgr
*bm
= intel
->bm
;
344 struct block
*block
, *tmp
;
347 foreach_s(block
, tmp
, &bm
->fenced
) {
348 assert(block
->fenced
);
350 if (bmTestFence(intel
, block
->fence
)) {
355 DBG("delayed free: offset %x sz %x\n", block
->mem
->ofs
, block
->mem
->size
);
356 remove_from_list(block
);
357 mmFreeMem(block
->mem
);
361 DBG("return to lru: offset %x sz %x\n", block
->mem
->ofs
, block
->mem
->size
);
362 move_to_tail(&block
->pool
->lru
, block
);
368 /* Blocks are ordered by fence, so if one fails, all from
369 * here will fail also:
375 /* Also check the referenced list:
377 foreach_s(block
, tmp
, &bm
->referenced
) {
379 bmTestFence(intel
, block
->fence
)) {
385 DBG("%s: %d\n", __FUNCTION__
, ret
);
391 static void fence_blocks( struct intel_context
*intel
,
394 struct bufmgr
*bm
= intel
->bm
;
395 struct block
*block
, *tmp
;
397 foreach_s (block
, tmp
, &bm
->on_hardware
) {
398 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
399 block
->mem
->size
, block
->buf
, fence
);
400 block
->fence
= fence
;
402 block
->on_hardware
= 0;
405 /* Move to tail of pending list here
407 move_to_tail(&bm
->fenced
, block
);
410 /* Also check the referenced list:
412 foreach_s (block
, tmp
, &bm
->referenced
) {
413 if (block
->on_hardware
) {
414 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block
,
415 block
->mem
->size
, block
->buf
, fence
);
417 block
->fence
= fence
;
418 block
->on_hardware
= 0;
424 bm
->last_fence
= fence
;
425 assert(is_empty_list(&bm
->on_hardware
));
431 static GLboolean
alloc_block( struct intel_context
*intel
,
434 struct bufmgr
*bm
= intel
->bm
;
437 assert(intel
->locked
);
439 DBG("%s 0x%x bytes (%s)\n", __FUNCTION__
, buf
->size
, buf
->name
);
441 for (i
= 0; i
< bm
->nr_pools
; i
++) {
442 if (!(bm
->pool
[i
].flags
& BM_NO_ALLOC
) &&
443 alloc_from_pool(intel
, i
, buf
)) {
445 DBG("%s --> 0x%x (sz %x)\n", __FUNCTION__
,
446 buf
->block
->mem
->ofs
, buf
->block
->mem
->size
);
452 DBG("%s --> fail\n", __FUNCTION__
);
457 static GLboolean
evict_and_alloc_block( struct intel_context
*intel
,
461 struct bufmgr
*bm
= intel
->bm
;
463 assert(buf
->block
== NULL
);
465 /* Put a cap on the amount of free memory we'll allow to accumulate
466 * before emitting a fence.
468 if (bm
->free_on_hardware
> 1 * 1024 * 1024) {
469 DBG("fence for free space: %x\n", bm
->free_on_hardware
);
473 /* Search for already free memory:
475 if (alloc_block(intel
, buf
))
478 /* Look for memory that may have become free:
480 if (check_fenced(intel
) &&
481 alloc_block(intel
, buf
))
484 /* Look for memory blocks not used for >1 frame:
486 while (evict_lru(intel
, intel
->second_last_swap_fence
, &pool
))
487 if (alloc_from_pool(intel
, pool
, buf
))
490 /* If we're not thrashing, allow lru eviction to dig deeper into
491 * recently used textures. We'll probably be thrashing soon:
493 if (!intel
->thrashing
) {
494 while (evict_lru(intel
, 0, &pool
))
495 if (alloc_from_pool(intel
, pool
, buf
))
499 /* Keep thrashing counter alive?
501 if (intel
->thrashing
)
502 intel
->thrashing
= 20;
504 /* Wait on any already pending fences - here we are waiting for any
505 * freed memory that has been submitted to hardware and fenced to
508 while (!is_empty_list(&bm
->fenced
)) {
509 GLuint fence
= bm
->fenced
.next
->fence
;
510 bmFinishFence(intel
, fence
);
512 if (alloc_block(intel
, buf
))
519 if (!is_empty_list(&bm
->on_hardware
)) {
522 while (!is_empty_list(&bm
->fenced
)) {
523 GLuint fence
= bm
->fenced
.next
->fence
;
524 bmFinishFence(intel
, fence
);
527 if (!intel
->thrashing
) {
530 intel
->thrashing
= 20;
532 if (alloc_block(intel
, buf
))
536 while (evict_mru(intel
, &pool
))
537 if (alloc_from_pool(intel
, pool
, buf
))
540 DBG("%s 0x%x bytes failed\n", __FUNCTION__
, buf
->size
);
542 assert(is_empty_list(&bm
->on_hardware
));
543 assert(is_empty_list(&bm
->fenced
));
557 /***********************************************************************
562 /* The initialization functions are skewed in the fake implementation.
563 * This call would be to attach to an existing manager, rather than to
564 * create a local one.
566 struct bufmgr
*bm_fake_intel_Attach( struct intel_context
*intel
)
568 _glthread_DECLARE_STATIC_MUTEX(initMutex
);
569 static struct bufmgr bm
;
571 /* This function needs a mutex of its own...
573 _glthread_LOCK_MUTEX(initMutex
);
575 if (nr_attach
== 0) {
576 _glthread_INIT_MUTEX(bm
.mutex
);
578 make_empty_list(&bm
.referenced
);
579 make_empty_list(&bm
.fenced
);
580 make_empty_list(&bm
.on_hardware
);
582 /* The context id of any of the share group. This won't be used
583 * in communication with the kernel, so it doesn't matter if
584 * this context is eventually deleted.
586 bm
.ctxId
= intel
->hHWContext
;
591 _glthread_UNLOCK_MUTEX(initMutex
);
598 /* The virtual pointer would go away in a true implementation.
600 int bmInitPool( struct intel_context
*intel
,
601 unsigned long low_offset
,
606 struct bufmgr
*bm
= intel
->bm
;
613 for (i
= 0; i
< bm
->nr_pools
; i
++) {
614 if (bm
->pool
[i
].low_offset
== low_offset
&&
615 bm
->pool
[i
].size
== size
) {
622 if (bm
->nr_pools
>= BM_POOL_MAX
)
627 DBG("bmInitPool %d low_offset %x sz %x\n",
628 i
, low_offset
, size
);
630 bm
->pool
[i
].low_offset
= low_offset
;
631 bm
->pool
[i
].size
= size
;
632 bm
->pool
[i
].heap
= mmInit( low_offset
, size
);
633 bm
->pool
[i
].virtual = low_virtual
- low_offset
;
634 bm
->pool
[i
].flags
= flags
;
636 make_empty_list(&bm
->pool
[i
].lru
);
646 static struct buffer
*do_GenBuffer(struct intel_context
*intel
, const char *name
, int align
)
648 struct bufmgr
*bm
= intel
->bm
;
649 struct buffer
*buf
= calloc(sizeof(*buf
), 1);
651 buf
->id
= ++bm
->buf_nr
;
653 buf
->alignment
= align
;
654 buf
->flags
= BM_MEM_AGP
|BM_MEM_VRAM
|BM_MEM_LOCAL
;
660 void *bmFindVirtual( struct intel_context
*intel
,
664 struct bufmgr
*bm
= intel
->bm
;
667 for (i
= 0; i
< bm
->nr_pools
; i
++)
668 if (offset
>= bm
->pool
[i
].low_offset
&&
669 offset
+ sz
<= bm
->pool
[i
].low_offset
+ bm
->pool
[i
].size
)
670 return bm
->pool
[i
].virtual + offset
;
676 void bmGenBuffers(struct intel_context
*intel
,
677 const char *name
, unsigned n
,
678 struct buffer
**buffers
,
681 struct bufmgr
*bm
= intel
->bm
;
686 for (i
= 0; i
< n
; i
++)
687 buffers
[i
] = do_GenBuffer(intel
, name
, align
);
693 void bmDeleteBuffers(struct intel_context
*intel
, unsigned n
, struct buffer
**buffers
)
695 struct bufmgr
*bm
= intel
->bm
;
701 for (i
= 0; i
< n
; i
++) {
702 struct buffer
*buf
= buffers
[i
];
704 if (buf
&& buf
->block
)
705 free_block(intel
, buf
->block
);
717 /* Hook to inform faked buffer manager about fixed-position
718 * front,depth,back buffers. These may move to a fully memory-managed
719 * scheme, or they may continue to be managed as is. It will probably
720 * be useful to pass a fixed offset here one day.
722 struct buffer
*bmGenBufferStatic(struct intel_context
*intel
,
725 struct bufmgr
*bm
= intel
->bm
;
729 assert(bm
->pool
[pool
].flags
& BM_NO_EVICT
);
730 assert(bm
->pool
[pool
].flags
& BM_NO_MOVE
);
732 if (bm
->pool
[pool
].static_buffer
)
733 buf
= bm
->pool
[pool
].static_buffer
;
735 buf
= do_GenBuffer(intel
, "static", 12);
737 bm
->pool
[pool
].static_buffer
= buf
;
740 buf
->size
= bm
->pool
[pool
].size
;
741 buf
->flags
= bm
->pool
[pool
].flags
;
744 if (!alloc_from_pool(intel
, pool
, buf
))
753 static void wait_quiescent(struct intel_context
*intel
,
756 if (block
->on_hardware
) {
757 assert(intel
->bm
->need_fence
);
759 assert(!block
->on_hardware
);
764 bmFinishFence(intel
, block
->fence
);
767 assert(!block
->on_hardware
);
768 assert(!block
->fenced
);
773 /* If buffer size changes, free and reallocate. Otherwise update in
776 int bmBufferData(struct intel_context
*intel
,
782 struct bufmgr
*bm
= intel
->bm
;
787 DBG("bmBufferData %d sz 0x%x data: %p\n", buf
->id
, size
, data
);
789 assert(!buf
->mapped
);
792 struct block
*block
= buf
->block
;
794 /* Optimistic check to see if we can reuse the block -- not
795 * required for correctness:
800 if (block
->on_hardware
||
802 (buf
->size
&& buf
->size
!= size
) ||
805 assert(!block
->referenced
);
807 free_block(intel
, block
);
815 assert (buf
->block
->mem
->size
>= size
);
818 if (buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)) {
820 assert(intel
->locked
|| data
== NULL
);
823 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
829 wait_quiescent(intel
, buf
->block
);
831 DBG("bmBufferData %d offset 0x%x sz 0x%x\n",
832 buf
->id
, buf
->block
->mem
->ofs
, size
);
834 assert(buf
->block
->virtual == buf
->block
->pool
->virtual + buf
->block
->mem
->ofs
);
836 do_memcpy(buf
->block
->virtual, data
, size
);
841 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
842 set_dirty(intel
, buf
);
843 free_backing_store(intel
, buf
);
846 alloc_backing_store(intel
, buf
);
847 do_memcpy(buf
->backing_store
, data
, size
);
857 /* Update the buffer in place, in whatever space it is currently resident:
859 int bmBufferSubData(struct intel_context
*intel
,
865 struct bufmgr
*bm
= intel
->bm
;
873 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf
->id
, offset
, size
);
875 assert(offset
+size
<= buf
->size
);
877 if (buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)) {
879 assert(intel
->locked
);
881 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
887 if (!(buf
->flags
& BM_NO_FENCE_SUBDATA
))
888 wait_quiescent(intel
, buf
->block
);
892 do_memcpy(buf
->block
->virtual + offset
, data
, size
);
895 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
896 set_dirty(intel
, buf
);
898 if (buf
->backing_store
== NULL
)
899 alloc_backing_store(intel
, buf
);
901 do_memcpy(buf
->backing_store
+ offset
, data
, size
);
911 int bmBufferDataAUB(struct intel_context
*intel
,
917 unsigned aubsubtype
)
919 int retval
= bmBufferData(intel
, buf
, size
, data
, flags
);
922 /* This only works because in this version of the buffer manager we
923 * allocate all buffers statically in agp space and so can emit the
924 * uploads to the aub file with the correct offsets as they happen.
926 if (retval
== 0 && data
&& intel
->aub_file
) {
928 if (buf
->block
&& !buf
->dirty
) {
929 intel
->vtbl
.aub_gtt_data(intel
,
930 buf
->block
->mem
->ofs
,
943 int bmBufferSubDataAUB(struct intel_context
*intel
,
949 unsigned aubsubtype
)
951 int retval
= bmBufferSubData(intel
, buf
, offset
, size
, data
);
954 /* This only works because in this version of the buffer manager we
955 * allocate all buffers statically in agp space and so can emit the
956 * uploads to the aub file with the correct offsets as they happen.
958 if (intel
->aub_file
) {
959 if (retval
== 0 && buf
->block
&& !buf
->dirty
)
960 intel
->vtbl
.aub_gtt_data(intel
,
961 buf
->block
->mem
->ofs
+ offset
,
962 ((const char *)buf
->block
->virtual) + offset
,
971 void bmUnmapBufferAUB( struct intel_context
*intel
,
974 unsigned aubsubtype
)
976 bmUnmapBuffer(intel
, buf
);
978 if (intel
->aub_file
) {
979 /* Hack - exclude the framebuffer mappings. If you removed
980 * this, you'd get very big aubfiles, but you *would* be able to
981 * see fallback rendering.
983 if (buf
->block
&& !buf
->dirty
&& buf
->block
->pool
== &intel
->bm
->pool
[0]) {
989 unsigned bmBufferOffset(struct intel_context
*intel
,
992 struct bufmgr
*bm
= intel
->bm
;
997 assert(intel
->locked
);
1000 !evict_and_alloc_block(intel
, buf
)) {
1006 assert(buf
->block
->buf
== buf
);
1008 DBG("Add buf %d (block %p, dirty %d) to referenced list\n", buf
->id
, buf
->block
,
1011 move_to_tail(&bm
->referenced
, buf
->block
);
1012 buf
->block
->referenced
= 1;
1014 retval
= buf
->block
->mem
->ofs
;
1024 /* Extract data from the buffer:
1026 void bmBufferGetSubData(struct intel_context
*intel
,
1032 struct bufmgr
*bm
= intel
->bm
;
1036 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf
->id
, offset
, size
);
1038 if (buf
->flags
& (BM_NO_EVICT
|BM_NO_BACKING_STORE
)) {
1039 if (buf
->block
&& size
) {
1040 wait_quiescent(intel
, buf
->block
);
1041 do_memcpy(data
, buf
->block
->virtual + offset
, size
);
1045 if (buf
->backing_store
&& size
) {
1046 do_memcpy(data
, buf
->backing_store
+ offset
, size
);
1054 /* Return a pointer to whatever space the buffer is currently resident in:
1056 void *bmMapBuffer( struct intel_context
*intel
,
1060 struct bufmgr
*bm
= intel
->bm
;
1061 void *retval
= NULL
;
1065 DBG("bmMapBuffer %d\n", buf
->id
);
1068 _mesa_printf("%s: already mapped\n", __FUNCTION__
);
1071 else if (buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)) {
1073 assert(intel
->locked
);
1075 if (!buf
->block
&& !evict_and_alloc_block(intel
, buf
)) {
1076 DBG("%s: alloc failed\n", __FUNCTION__
);
1084 if (!(buf
->flags
& BM_NO_FENCE_SUBDATA
))
1085 wait_quiescent(intel
, buf
->block
);
1088 retval
= buf
->block
->virtual;
1092 DBG("%s - set buf %d dirty\n", __FUNCTION__
, buf
->id
);
1093 set_dirty(intel
, buf
);
1095 if (buf
->backing_store
== 0)
1096 alloc_backing_store(intel
, buf
);
1099 retval
= buf
->backing_store
;
1106 void bmUnmapBuffer( struct intel_context
*intel
, struct buffer
*buf
)
1108 struct bufmgr
*bm
= intel
->bm
;
1112 DBG("bmUnmapBuffer %d\n", buf
->id
);
1121 /* This is the big hack that turns on BM_NO_BACKING_STORE. Basically
1122 * says that an external party will maintain the backing store, eg
1123 * Mesa's local copy of texture data.
1125 void bmBufferSetInvalidateCB(struct intel_context
*intel
,
1127 void (*invalidate_cb
)( struct intel_context
*, void *ptr
),
1129 GLboolean dont_fence_subdata
)
1131 struct bufmgr
*bm
= intel
->bm
;
1135 if (buf
->backing_store
)
1136 free_backing_store(intel
, buf
);
1138 buf
->flags
|= BM_NO_BACKING_STORE
;
1140 if (dont_fence_subdata
)
1141 buf
->flags
|= BM_NO_FENCE_SUBDATA
;
1143 DBG("bmBufferSetInvalidateCB set buf %d dirty\n", buf
->id
);
1145 buf
->invalidate_cb
= invalidate_cb
;
1146 buf
->invalidate_ptr
= ptr
;
1148 /* Note that it is invalid right from the start. Also note
1149 * invalidate_cb is called with the bufmgr locked, so cannot
1150 * itself make bufmgr calls.
1152 invalidate_cb( intel
, ptr
);
1163 /* This is only protected against thread interactions by the DRI lock
1164 * and the policy of ensuring that all dma is flushed prior to
1165 * releasing that lock. Otherwise you might have two threads building
1166 * up a list of buffers to validate at once.
1168 int bmValidateBuffers( struct intel_context
*intel
)
1170 struct bufmgr
*bm
= intel
->bm
;
1175 DBG("%s fail %d\n", __FUNCTION__
, bm
->fail
);
1176 assert(intel
->locked
);
1179 struct block
*block
, *tmp
;
1181 foreach_s(block
, tmp
, &bm
->referenced
) {
1182 struct buffer
*buf
= block
->buf
;
1184 DBG("Validate buf %d / block %p / dirty %d\n", buf
->id
, block
, buf
->dirty
);
1186 /* Upload the buffer contents if necessary:
1189 DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", buf
->id
,
1190 buf
->name
, buf
->size
, block
->mem
->ofs
);
1192 assert(!(buf
->flags
& (BM_NO_BACKING_STORE
|BM_NO_EVICT
)));
1194 wait_quiescent(intel
, buf
->block
);
1196 do_memcpy(buf
->block
->virtual,
1200 if (intel
->aub_file
) {
1201 intel
->vtbl
.aub_gtt_data(intel
,
1202 buf
->block
->mem
->ofs
,
1212 else if (buf
->aub_dirty
) {
1213 intel
->vtbl
.aub_gtt_data(intel
,
1214 buf
->block
->mem
->ofs
,
1215 buf
->block
->virtual,
1222 block
->referenced
= 0;
1223 block
->on_hardware
= 1;
1224 move_to_tail(&bm
->on_hardware
, block
);
1230 retval
= bm
->fail
? -1 : 0;
1236 DBG("%s failed\n", __FUNCTION__
);
1244 void bmReleaseBuffers( struct intel_context
*intel
)
1246 struct bufmgr
*bm
= intel
->bm
;
1250 struct block
*block
, *tmp
;
1252 foreach_s (block
, tmp
, &bm
->referenced
) {
1254 DBG("remove block %p from referenced list\n", block
);
1256 if (block
->on_hardware
) {
1257 /* Return to the on-hardware list.
1259 move_to_tail(&bm
->on_hardware
, block
);
1261 else if (block
->fenced
) {
1264 /* Hmm - have to scan the fenced list to insert the
1265 * buffers in order. This is O(nm), but rare and the
1268 foreach (s
, &bm
->fenced
) {
1269 if (FENCE_LTE(block
->fence
, s
->fence
))
1273 move_to_tail(s
, block
);
1276 /* Return to the lru list:
1278 move_to_tail(&block
->pool
->lru
, block
);
1281 block
->referenced
= 0;
1288 /* This functionality is used by the buffer manager, not really sure
1289 * if we need to be exposing it in this way, probably libdrm will
1290 * offer equivalent calls.
1292 * For now they can stay, but will likely change/move before final:
1294 unsigned bmSetFence( struct intel_context
*intel
)
1296 assert(intel
->locked
);
1298 /* Emit MI_FLUSH here:
1300 if (intel
->bm
->need_fence
) {
1302 /* Emit a flush without using a batchbuffer. Can't rely on the
1303 * batchbuffer at this level really. Would really prefer that
1304 * the IRQ ioctly emitted the flush at the same time.
1307 dword
[0] = intel
->vtbl
.flush_cmd();
1309 intel_cmd_ioctl(intel
, (char *)&dword
, sizeof(dword
));
1311 intel
->bm
->last_fence
= intelEmitIrqLocked( intel
);
1313 fence_blocks(intel
, intel
->bm
->last_fence
);
1315 intel
->vtbl
.note_fence(intel
, intel
->bm
->last_fence
);
1316 intel
->bm
->need_fence
= 0;
1318 if (intel
->thrashing
) {
1320 if (!intel
->thrashing
)
1321 DBG("not thrashing\n");
1324 intel
->bm
->free_on_hardware
= 0;
1327 return intel
->bm
->last_fence
;
1330 unsigned bmSetFenceLock( struct intel_context
*intel
)
1334 last
= bmSetFence(intel
);
1338 unsigned bmLockAndFence( struct intel_context
*intel
)
1340 if (intel
->bm
->need_fence
) {
1341 LOCK_HARDWARE(intel
);
1345 UNLOCK_HARDWARE(intel
);
1348 return intel
->bm
->last_fence
;
1352 void bmFinishFence( struct intel_context
*intel
, unsigned fence
)
1354 if (!bmTestFence(intel
, fence
)) {
1355 DBG("...wait on fence %d\n", fence
);
1356 intelWaitIrq( intel
, fence
);
1358 assert(bmTestFence(intel
, fence
));
1359 check_fenced(intel
);
1362 void bmFinishFenceLock( struct intel_context
*intel
, unsigned fence
)
1365 bmFinishFence(intel
, fence
);
1370 /* Specifically ignore texture memory sharing.
1371 * -- just evict everything
1372 * -- and wait for idle
1374 void bm_fake_NotifyContendedLockTake( struct intel_context
*intel
)
1376 struct bufmgr
*bm
= intel
->bm
;
1380 struct block
*block
, *tmp
;
1383 assert(is_empty_list(&bm
->referenced
));
1387 bmFinishFence(intel
, bmSetFence(intel
));
1389 assert(is_empty_list(&bm
->fenced
));
1390 assert(is_empty_list(&bm
->on_hardware
));
1392 for (i
= 0; i
< bm
->nr_pools
; i
++) {
1393 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
1394 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
1395 assert(bmTestFence(intel
, block
->fence
));
1396 set_dirty(intel
, block
->buf
);
1406 void bmEvictAll( struct intel_context
*intel
)
1408 struct bufmgr
*bm
= intel
->bm
;
1412 struct block
*block
, *tmp
;
1415 DBG("%s\n", __FUNCTION__
);
1417 assert(is_empty_list(&bm
->referenced
));
1421 bmFinishFence(intel
, bmSetFence(intel
));
1423 assert(is_empty_list(&bm
->fenced
));
1424 assert(is_empty_list(&bm
->on_hardware
));
1426 for (i
= 0; i
< bm
->nr_pools
; i
++) {
1427 if (!(bm
->pool
[i
].flags
& BM_NO_EVICT
)) {
1428 foreach_s(block
, tmp
, &bm
->pool
[i
].lru
) {
1429 assert(bmTestFence(intel
, block
->fence
));
1430 set_dirty(intel
, block
->buf
);
1431 block
->buf
->block
= NULL
;
1433 free_block(intel
, block
);
1442 GLboolean
bmError( struct intel_context
*intel
)
1444 struct bufmgr
*bm
= intel
->bm
;
1457 GLuint
bmCtxId( struct intel_context
*intel
)
1459 return intel
->bm
->ctxId
;