1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * Copyright 1999 Wittawat Yamwong
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Buffer manager using the old texture memory manager.
33 * \author José Fonseca <jrfonseca@tungstengraphics.com>
37 #include "linked_list.h"
39 #include "pipe/p_defines.h"
40 #include "pipe/p_debug.h"
41 #include "pipe/p_thread.h"
42 #include "pipe/p_util.h"
43 #include "pb_buffer.h"
44 #include "pb_bufmgr.h"
48 * Convenience macro (type safe).
50 #define SUPER(__derived) (&(__derived)->base)
55 struct mem_block
*next
, *prev
;
56 struct mem_block
*next_free
, *prev_free
;
57 struct mem_block
*heap
;
60 unsigned int reserved
:1;
66 * For debugging purposes.
69 mmDumpMemInfo(const struct mem_block
*heap
)
71 debug_printf("Memory heap %p:\n", (void *)heap
);
73 debug_printf(" heap == 0\n");
75 const struct mem_block
*p
;
77 for(p
= heap
->next
; p
!= heap
; p
= p
->next
) {
78 debug_printf(" Offset:%08x, Size:%08x, %c%c\n",p
->ofs
,p
->size
,
80 p
->reserved
? 'R':'.');
83 debug_printf("\nFree list:\n");
85 for(p
= heap
->next_free
; p
!= heap
; p
= p
->next_free
) {
86 debug_printf(" FREE Offset:%08x, Size:%08x, %c%c\n",p
->ofs
,p
->size
,
88 p
->reserved
? 'R':'.');
92 debug_printf("End of memory blocks\n");
98 * input: total size in bytes
99 * return: a heap pointer if OK, NULL if error
101 static struct mem_block
*
102 mmInit(int ofs
, int size
)
104 struct mem_block
*heap
, *block
;
109 heap
= CALLOC_STRUCT(mem_block
);
113 block
= CALLOC_STRUCT(mem_block
);
121 heap
->next_free
= block
;
122 heap
->prev_free
= block
;
127 block
->next_free
= heap
;
128 block
->prev_free
= heap
;
138 static struct mem_block
*
139 SliceBlock(struct mem_block
*p
,
140 int startofs
, int size
,
141 int reserved
, int alignment
)
143 struct mem_block
*newblock
;
145 /* break left [p, newblock, p->next], then p = newblock */
146 if (startofs
> p
->ofs
) {
147 newblock
= CALLOC_STRUCT(mem_block
);
150 newblock
->ofs
= startofs
;
151 newblock
->size
= p
->size
- (startofs
- p
->ofs
);
153 newblock
->heap
= p
->heap
;
155 newblock
->next
= p
->next
;
157 p
->next
->prev
= newblock
;
160 newblock
->next_free
= p
->next_free
;
161 newblock
->prev_free
= p
;
162 p
->next_free
->prev_free
= newblock
;
163 p
->next_free
= newblock
;
165 p
->size
-= newblock
->size
;
169 /* break right, also [p, newblock, p->next] */
170 if (size
< p
->size
) {
171 newblock
= CALLOC_STRUCT(mem_block
);
174 newblock
->ofs
= startofs
+ size
;
175 newblock
->size
= p
->size
- size
;
177 newblock
->heap
= p
->heap
;
179 newblock
->next
= p
->next
;
181 p
->next
->prev
= newblock
;
184 newblock
->next_free
= p
->next_free
;
185 newblock
->prev_free
= p
;
186 p
->next_free
->prev_free
= newblock
;
187 p
->next_free
= newblock
;
192 /* p = middle block */
195 /* Remove p from the free list:
197 p
->next_free
->prev_free
= p
->prev_free
;
198 p
->prev_free
->next_free
= p
->next_free
;
203 p
->reserved
= reserved
;
209 * Allocate 'size' bytes with 2^align2 bytes alignment,
210 * restrict the search to free memory after 'startSearch'
211 * depth and back buffers should be in different 4mb banks
212 * to get better page hits if possible
213 * input: size = size of block
214 * align2 = 2^align2 bytes alignment
215 * startSearch = linear offset from start of heap to begin search
216 * return: pointer to the allocated block, 0 if error
218 static struct mem_block
*
219 mmAllocMem(struct mem_block
*heap
, int size
, int align2
, int startSearch
)
222 const int mask
= (1 << align2
)-1;
226 if (!heap
|| align2
< 0 || size
<= 0)
229 for (p
= heap
->next_free
; p
!= heap
; p
= p
->next_free
) {
232 startofs
= (p
->ofs
+ mask
) & ~mask
;
233 if ( startofs
< startSearch
) {
234 startofs
= startSearch
;
236 endofs
= startofs
+size
;
237 if (endofs
<= (p
->ofs
+p
->size
))
245 p
= SliceBlock(p
,startofs
,size
,0,mask
+1);
253 * Free block starts at offset
254 * input: pointer to a heap, start offset
255 * return: pointer to a block
257 static struct mem_block
*
258 mmFindBlock(struct mem_block
*heap
, int start
)
262 for (p
= heap
->next
; p
!= heap
; p
= p
->next
) {
273 Join2Blocks(struct mem_block
*p
)
275 /* XXX there should be some assertions here */
277 /* NOTE: heap->free == 0 */
279 if (p
->free
&& p
->next
->free
) {
280 struct mem_block
*q
= p
->next
;
282 assert(p
->ofs
+ p
->size
== q
->ofs
);
288 q
->next_free
->prev_free
= q
->prev_free
;
289 q
->prev_free
->next_free
= q
->next_free
;
299 * Free block starts at offset
300 * input: pointer to a block
301 * return: 0 if OK, -1 if error
304 mmFreeMem(struct mem_block
*b
)
310 debug_printf("block already free\n");
314 debug_printf("block is reserved\n");
319 b
->next_free
= b
->heap
->next_free
;
320 b
->prev_free
= b
->heap
;
321 b
->next_free
->prev_free
= b
;
322 b
->prev_free
->next_free
= b
;
325 if (b
->prev
!= b
->heap
)
326 Join2Blocks(b
->prev
);
336 mmDestroy(struct mem_block
*heap
)
343 for (p
= heap
->next
; p
!= heap
; ) {
344 struct mem_block
*next
= p
->next
;
355 struct pb_manager base
;
357 _glthread_Mutex mutex
;
360 struct mem_block
*heap
;
364 struct pb_buffer
*buffer
;
369 static INLINE
struct mm_pb_manager
*
370 mm_pb_manager(struct pb_manager
*mgr
)
373 return (struct mm_pb_manager
*)mgr
;
379 struct pb_buffer base
;
381 struct mm_pb_manager
*mgr
;
383 struct mem_block
*block
;
387 static INLINE
struct mm_buffer
*
388 mm_buffer(struct pb_buffer
*buf
)
391 return (struct mm_buffer
*)buf
;
396 mm_buffer_destroy(struct pb_buffer
*buf
)
398 struct mm_buffer
*mm_buf
= mm_buffer(buf
);
399 struct mm_pb_manager
*mm
= mm_buf
->mgr
;
401 assert(buf
->base
.refcount
== 0);
403 _glthread_LOCK_MUTEX(mm
->mutex
);
404 mmFreeMem(mm_buf
->block
);
406 _glthread_UNLOCK_MUTEX(mm
->mutex
);
411 mm_buffer_map(struct pb_buffer
*buf
,
414 struct mm_buffer
*mm_buf
= mm_buffer(buf
);
415 struct mm_pb_manager
*mm
= mm_buf
->mgr
;
417 return (unsigned char *) mm
->map
+ mm_buf
->block
->ofs
;
422 mm_buffer_unmap(struct pb_buffer
*buf
)
429 mm_buffer_get_base_buffer(struct pb_buffer
*buf
,
430 struct pb_buffer
**base_buf
,
433 struct mm_buffer
*mm_buf
= mm_buffer(buf
);
434 struct mm_pb_manager
*mm
= mm_buf
->mgr
;
435 pb_get_base_buffer(mm
->buffer
, base_buf
, offset
);
436 *offset
+= mm_buf
->block
->ofs
;
440 static const struct pb_vtbl
445 mm_buffer_get_base_buffer
449 static struct pb_buffer
*
450 mm_bufmgr_create_buffer(struct pb_manager
*mgr
,
452 const struct pb_desc
*desc
)
454 struct mm_pb_manager
*mm
= mm_pb_manager(mgr
);
455 struct mm_buffer
*mm_buf
;
457 /* We don't handle alignments larger then the one initially setup */
458 assert(desc
->alignment
% (1 << mm
->align2
) == 0);
459 if(desc
->alignment
% (1 << mm
->align2
))
462 _glthread_LOCK_MUTEX(mm
->mutex
);
464 mm_buf
= CALLOC_STRUCT(mm_buffer
);
466 _glthread_UNLOCK_MUTEX(mm
->mutex
);
470 mm_buf
->base
.base
.refcount
= 1;
471 mm_buf
->base
.base
.alignment
= desc
->alignment
;
472 mm_buf
->base
.base
.usage
= desc
->usage
;
473 mm_buf
->base
.base
.size
= size
;
475 mm_buf
->base
.vtbl
= &mm_buffer_vtbl
;
479 mm_buf
->block
= mmAllocMem(mm
->heap
, size
, mm
->align2
, 0);
481 debug_printf("warning: heap full\n");
483 mmDumpMemInfo(mm
->heap
);
486 mm_buf
->block
= mmAllocMem(mm
->heap
, size
, mm
->align2
, 0);
490 _glthread_UNLOCK_MUTEX(mm
->mutex
);
495 /* Some sanity checks */
496 assert(0 <= mm_buf
->block
->ofs
&& mm_buf
->block
->ofs
< mm
->size
);
497 assert(size
<= mm_buf
->block
->size
&& mm_buf
->block
->ofs
+ mm_buf
->block
->size
<= mm
->size
);
499 _glthread_UNLOCK_MUTEX(mm
->mutex
);
500 return SUPER(mm_buf
);
505 mm_bufmgr_destroy(struct pb_manager
*mgr
)
507 struct mm_pb_manager
*mm
= mm_pb_manager(mgr
);
509 _glthread_LOCK_MUTEX(mm
->mutex
);
513 pb_unmap(mm
->buffer
);
514 pb_reference(&mm
->buffer
, NULL
);
516 _glthread_UNLOCK_MUTEX(mm
->mutex
);
523 mm_bufmgr_create_from_buffer(struct pb_buffer
*buffer
,
524 size_t size
, size_t align2
)
526 struct mm_pb_manager
*mm
;
531 mm
= CALLOC_STRUCT(mm_pb_manager
);
535 mm
->base
.create_buffer
= mm_bufmgr_create_buffer
;
536 mm
->base
.destroy
= mm_bufmgr_destroy
;
539 mm
->align2
= align2
; /* 64-byte alignment */
541 _glthread_INIT_MUTEX(mm
->mutex
);
545 mm
->map
= pb_map(mm
->buffer
,
546 PIPE_BUFFER_USAGE_CPU_READ
|
547 PIPE_BUFFER_USAGE_CPU_WRITE
);
551 mm
->heap
= mmInit(0, size
);
561 pb_unmap(mm
->buffer
);
569 mm_bufmgr_create(struct pb_manager
*provider
,
570 size_t size
, size_t align2
)
572 struct pb_buffer
*buffer
;
573 struct pb_manager
*mgr
;
577 assert(provider
->create_buffer
);
579 memset(&desc
, 0, sizeof(desc
));
580 desc
.alignment
= 1 << align2
;
582 buffer
= provider
->create_buffer(provider
, size
, &desc
);
586 mgr
= mm_bufmgr_create_from_buffer(buffer
, size
, align2
);
588 pb_reference(&buffer
, NULL
);