Standardize on using the pipe/ include prefix.
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_bufmgr_mm.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * Copyright 1999 Wittawat Yamwong
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 /**
30 * \file
31 * Buffer manager using the old texture memory manager.
32 *
33 * \author José Fonseca <jrfonseca@tungstengraphics.com>
34 */
35
36
37 #include "linked_list.h"
38
39 #include "pipe/p_defines.h"
40 #include "pipe/p_debug.h"
41 #include "pipe/p_thread.h"
42 #include "pipe/p_util.h"
43 #include "pb_buffer.h"
44 #include "pb_bufmgr.h"
45
46
47 /**
48 * Convenience macro (type safe).
49 */
50 #define SUPER(__derived) (&(__derived)->base)
51
52
53 struct mem_block
54 {
55 struct mem_block *next, *prev;
56 struct mem_block *next_free, *prev_free;
57 struct mem_block *heap;
58 int ofs, size;
59 unsigned int free:1;
60 unsigned int reserved:1;
61 };
62
63
64 #ifdef DEBUG
65 /**
66 * For debugging purposes.
67 */
68 static void
69 mmDumpMemInfo(const struct mem_block *heap)
70 {
71 debug_printf("Memory heap %p:\n", (void *)heap);
72 if (heap == 0) {
73 debug_printf(" heap == 0\n");
74 } else {
75 const struct mem_block *p;
76
77 for(p = heap->next; p != heap; p = p->next) {
78 debug_printf(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
79 p->free ? 'F':'.',
80 p->reserved ? 'R':'.');
81 }
82
83 debug_printf("\nFree list:\n");
84
85 for(p = heap->next_free; p != heap; p = p->next_free) {
86 debug_printf(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
87 p->free ? 'F':'.',
88 p->reserved ? 'R':'.');
89 }
90
91 }
92 debug_printf("End of memory blocks\n");
93 }
94 #endif
95
96
97 /**
98 * input: total size in bytes
99 * return: a heap pointer if OK, NULL if error
100 */
101 static struct mem_block *
102 mmInit(int ofs, int size)
103 {
104 struct mem_block *heap, *block;
105
106 if (size <= 0)
107 return NULL;
108
109 heap = CALLOC_STRUCT(mem_block);
110 if (!heap)
111 return NULL;
112
113 block = CALLOC_STRUCT(mem_block);
114 if (!block) {
115 FREE(heap);
116 return NULL;
117 }
118
119 heap->next = block;
120 heap->prev = block;
121 heap->next_free = block;
122 heap->prev_free = block;
123
124 block->heap = heap;
125 block->next = heap;
126 block->prev = heap;
127 block->next_free = heap;
128 block->prev_free = heap;
129
130 block->ofs = ofs;
131 block->size = size;
132 block->free = 1;
133
134 return heap;
135 }
136
137
138 static struct mem_block *
139 SliceBlock(struct mem_block *p,
140 int startofs, int size,
141 int reserved, int alignment)
142 {
143 struct mem_block *newblock;
144
145 /* break left [p, newblock, p->next], then p = newblock */
146 if (startofs > p->ofs) {
147 newblock = CALLOC_STRUCT(mem_block);
148 if (!newblock)
149 return NULL;
150 newblock->ofs = startofs;
151 newblock->size = p->size - (startofs - p->ofs);
152 newblock->free = 1;
153 newblock->heap = p->heap;
154
155 newblock->next = p->next;
156 newblock->prev = p;
157 p->next->prev = newblock;
158 p->next = newblock;
159
160 newblock->next_free = p->next_free;
161 newblock->prev_free = p;
162 p->next_free->prev_free = newblock;
163 p->next_free = newblock;
164
165 p->size -= newblock->size;
166 p = newblock;
167 }
168
169 /* break right, also [p, newblock, p->next] */
170 if (size < p->size) {
171 newblock = CALLOC_STRUCT(mem_block);
172 if (!newblock)
173 return NULL;
174 newblock->ofs = startofs + size;
175 newblock->size = p->size - size;
176 newblock->free = 1;
177 newblock->heap = p->heap;
178
179 newblock->next = p->next;
180 newblock->prev = p;
181 p->next->prev = newblock;
182 p->next = newblock;
183
184 newblock->next_free = p->next_free;
185 newblock->prev_free = p;
186 p->next_free->prev_free = newblock;
187 p->next_free = newblock;
188
189 p->size = size;
190 }
191
192 /* p = middle block */
193 p->free = 0;
194
195 /* Remove p from the free list:
196 */
197 p->next_free->prev_free = p->prev_free;
198 p->prev_free->next_free = p->next_free;
199
200 p->next_free = 0;
201 p->prev_free = 0;
202
203 p->reserved = reserved;
204 return p;
205 }
206
207
208 /**
209 * Allocate 'size' bytes with 2^align2 bytes alignment,
210 * restrict the search to free memory after 'startSearch'
211 * depth and back buffers should be in different 4mb banks
212 * to get better page hits if possible
213 * input: size = size of block
214 * align2 = 2^align2 bytes alignment
215 * startSearch = linear offset from start of heap to begin search
216 * return: pointer to the allocated block, 0 if error
217 */
218 static struct mem_block *
219 mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
220 {
221 struct mem_block *p;
222 const int mask = (1 << align2)-1;
223 int startofs = 0;
224 int endofs;
225
226 if (!heap || align2 < 0 || size <= 0)
227 return NULL;
228
229 for (p = heap->next_free; p != heap; p = p->next_free) {
230 assert(p->free);
231
232 startofs = (p->ofs + mask) & ~mask;
233 if ( startofs < startSearch ) {
234 startofs = startSearch;
235 }
236 endofs = startofs+size;
237 if (endofs <= (p->ofs+p->size))
238 break;
239 }
240
241 if (p == heap)
242 return NULL;
243
244 assert(p->free);
245 p = SliceBlock(p,startofs,size,0,mask+1);
246
247 return p;
248 }
249
250
251 #if 0
252 /**
253 * Free block starts at offset
254 * input: pointer to a heap, start offset
255 * return: pointer to a block
256 */
257 static struct mem_block *
258 mmFindBlock(struct mem_block *heap, int start)
259 {
260 struct mem_block *p;
261
262 for (p = heap->next; p != heap; p = p->next) {
263 if (p->ofs == start)
264 return p;
265 }
266
267 return NULL;
268 }
269 #endif
270
271
272 static INLINE int
273 Join2Blocks(struct mem_block *p)
274 {
275 /* XXX there should be some assertions here */
276
277 /* NOTE: heap->free == 0 */
278
279 if (p->free && p->next->free) {
280 struct mem_block *q = p->next;
281
282 assert(p->ofs + p->size == q->ofs);
283 p->size += q->size;
284
285 p->next = q->next;
286 q->next->prev = p;
287
288 q->next_free->prev_free = q->prev_free;
289 q->prev_free->next_free = q->next_free;
290
291 FREE(q);
292 return 1;
293 }
294 return 0;
295 }
296
297
298 /**
299 * Free block starts at offset
300 * input: pointer to a block
301 * return: 0 if OK, -1 if error
302 */
303 static int
304 mmFreeMem(struct mem_block *b)
305 {
306 if (!b)
307 return 0;
308
309 if (b->free) {
310 debug_printf("block already free\n");
311 return -1;
312 }
313 if (b->reserved) {
314 debug_printf("block is reserved\n");
315 return -1;
316 }
317
318 b->free = 1;
319 b->next_free = b->heap->next_free;
320 b->prev_free = b->heap;
321 b->next_free->prev_free = b;
322 b->prev_free->next_free = b;
323
324 Join2Blocks(b);
325 if (b->prev != b->heap)
326 Join2Blocks(b->prev);
327
328 return 0;
329 }
330
331
332 /**
333 * destroy MM
334 */
335 static void
336 mmDestroy(struct mem_block *heap)
337 {
338 struct mem_block *p;
339
340 if (!heap)
341 return;
342
343 for (p = heap->next; p != heap; ) {
344 struct mem_block *next = p->next;
345 FREE(p);
346 p = next;
347 }
348
349 FREE(heap);
350 }
351
352
353 struct mm_pb_manager
354 {
355 struct pb_manager base;
356
357 _glthread_Mutex mutex;
358
359 size_t size;
360 struct mem_block *heap;
361
362 size_t align2;
363
364 struct pb_buffer *buffer;
365 void *map;
366 };
367
368
369 static INLINE struct mm_pb_manager *
370 mm_pb_manager(struct pb_manager *mgr)
371 {
372 assert(mgr);
373 return (struct mm_pb_manager *)mgr;
374 }
375
376
377 struct mm_buffer
378 {
379 struct pb_buffer base;
380
381 struct mm_pb_manager *mgr;
382
383 struct mem_block *block;
384 };
385
386
387 static INLINE struct mm_buffer *
388 mm_buffer(struct pb_buffer *buf)
389 {
390 assert(buf);
391 return (struct mm_buffer *)buf;
392 }
393
394
395 static void
396 mm_buffer_destroy(struct pb_buffer *buf)
397 {
398 struct mm_buffer *mm_buf = mm_buffer(buf);
399 struct mm_pb_manager *mm = mm_buf->mgr;
400
401 assert(buf->base.refcount == 0);
402
403 _glthread_LOCK_MUTEX(mm->mutex);
404 mmFreeMem(mm_buf->block);
405 FREE(buf);
406 _glthread_UNLOCK_MUTEX(mm->mutex);
407 }
408
409
410 static void *
411 mm_buffer_map(struct pb_buffer *buf,
412 unsigned flags)
413 {
414 struct mm_buffer *mm_buf = mm_buffer(buf);
415 struct mm_pb_manager *mm = mm_buf->mgr;
416
417 return (unsigned char *) mm->map + mm_buf->block->ofs;
418 }
419
420
421 static void
422 mm_buffer_unmap(struct pb_buffer *buf)
423 {
424 /* No-op */
425 }
426
427
428 static void
429 mm_buffer_get_base_buffer(struct pb_buffer *buf,
430 struct pb_buffer **base_buf,
431 unsigned *offset)
432 {
433 struct mm_buffer *mm_buf = mm_buffer(buf);
434 struct mm_pb_manager *mm = mm_buf->mgr;
435 pb_get_base_buffer(mm->buffer, base_buf, offset);
436 *offset += mm_buf->block->ofs;
437 }
438
439
440 static const struct pb_vtbl
441 mm_buffer_vtbl = {
442 mm_buffer_destroy,
443 mm_buffer_map,
444 mm_buffer_unmap,
445 mm_buffer_get_base_buffer
446 };
447
448
449 static struct pb_buffer *
450 mm_bufmgr_create_buffer(struct pb_manager *mgr,
451 size_t size,
452 const struct pb_desc *desc)
453 {
454 struct mm_pb_manager *mm = mm_pb_manager(mgr);
455 struct mm_buffer *mm_buf;
456
457 /* We don't handle alignments larger then the one initially setup */
458 assert(desc->alignment % (1 << mm->align2) == 0);
459 if(desc->alignment % (1 << mm->align2))
460 return NULL;
461
462 _glthread_LOCK_MUTEX(mm->mutex);
463
464 mm_buf = CALLOC_STRUCT(mm_buffer);
465 if (!mm_buf) {
466 _glthread_UNLOCK_MUTEX(mm->mutex);
467 return NULL;
468 }
469
470 mm_buf->base.base.refcount = 1;
471 mm_buf->base.base.alignment = desc->alignment;
472 mm_buf->base.base.usage = desc->usage;
473 mm_buf->base.base.size = size;
474
475 mm_buf->base.vtbl = &mm_buffer_vtbl;
476
477 mm_buf->mgr = mm;
478
479 mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
480 if(!mm_buf->block) {
481 debug_printf("warning: heap full\n");
482 #if 0
483 mmDumpMemInfo(mm->heap);
484 #endif
485
486 mm_buf->block = mmAllocMem(mm->heap, size, mm->align2, 0);
487 if(!mm_buf->block) {
488 assert(0);
489 FREE(mm_buf);
490 _glthread_UNLOCK_MUTEX(mm->mutex);
491 return NULL;
492 }
493 }
494
495 /* Some sanity checks */
496 assert(0 <= mm_buf->block->ofs && mm_buf->block->ofs < mm->size);
497 assert(size <= mm_buf->block->size && mm_buf->block->ofs + mm_buf->block->size <= mm->size);
498
499 _glthread_UNLOCK_MUTEX(mm->mutex);
500 return SUPER(mm_buf);
501 }
502
503
504 static void
505 mm_bufmgr_destroy(struct pb_manager *mgr)
506 {
507 struct mm_pb_manager *mm = mm_pb_manager(mgr);
508
509 _glthread_LOCK_MUTEX(mm->mutex);
510
511 mmDestroy(mm->heap);
512
513 pb_unmap(mm->buffer);
514 pb_reference(&mm->buffer, NULL);
515
516 _glthread_UNLOCK_MUTEX(mm->mutex);
517
518 FREE(mgr);
519 }
520
521
522 struct pb_manager *
523 mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
524 size_t size, size_t align2)
525 {
526 struct mm_pb_manager *mm;
527
528 if(!buffer)
529 return NULL;
530
531 mm = CALLOC_STRUCT(mm_pb_manager);
532 if (!mm)
533 return NULL;
534
535 mm->base.create_buffer = mm_bufmgr_create_buffer;
536 mm->base.destroy = mm_bufmgr_destroy;
537
538 mm->size = size;
539 mm->align2 = align2; /* 64-byte alignment */
540
541 _glthread_INIT_MUTEX(mm->mutex);
542
543 mm->buffer = buffer;
544
545 mm->map = pb_map(mm->buffer,
546 PIPE_BUFFER_USAGE_CPU_READ |
547 PIPE_BUFFER_USAGE_CPU_WRITE);
548 if(!mm->map)
549 goto failure;
550
551 mm->heap = mmInit(0, size);
552 if (!mm->heap)
553 goto failure;
554
555 return SUPER(mm);
556
557 failure:
558 if(mm->heap)
559 mmDestroy(mm->heap);
560 if(mm->map)
561 pb_unmap(mm->buffer);
562 if(mm)
563 FREE(mm);
564 return NULL;
565 }
566
567
568 struct pb_manager *
569 mm_bufmgr_create(struct pb_manager *provider,
570 size_t size, size_t align2)
571 {
572 struct pb_buffer *buffer;
573 struct pb_manager *mgr;
574 struct pb_desc desc;
575
576 assert(provider);
577 assert(provider->create_buffer);
578
579 memset(&desc, 0, sizeof(desc));
580 desc.alignment = 1 << align2;
581
582 buffer = provider->create_buffer(provider, size, &desc);
583 if (!buffer)
584 return NULL;
585
586 mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
587 if (!mgr) {
588 pb_reference(&buffer, NULL);
589 return NULL;
590 }
591
592 return mgr;
593 }