914410e4bab6380e7e6bf7c71c8fc6c99c3fe3c2
[mesa.git] / src / mesa / drivers / dri / common / dri_bufmgr_fake.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
31 *
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
35 */
36 #include "mtypes.h"
37 #include "dri_bufmgr.h"
38 #include "drm.h"
39
40 #include "simple_list.h"
41 #include "mm.h"
42 #include "imports.h"
43
44 #define DBG(...)
45
46 /* Internal flags:
47 */
48 #define BM_NO_BACKING_STORE DRM_BO_FLAG_MEM_PRIV0
49 #define BM_NO_FENCE_SUBDATA DRM_BO_FLAG_MEM_PRIV1
50
51 /* Wrapper around mm.c's mem_block, which understands that you must
52 * wait for fences to expire before memory can be freed. This is
53 * specific to our use of memcpy for uploads - an upload that was
54 * processed through the command queue wouldn't need to care about
55 * fences.
56 */
57 struct block {
58 struct block *next, *prev;
59 struct mem_block *mem; /* BM_MEM_AGP */
60
61 unsigned referenced:1;
62 unsigned on_hardware:1;
63 unsigned fenced:1;
64
65 unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
66
67 dri_bo *bo;
68 void *virtual;
69 };
70
71 typedef struct _bufmgr_fake {
72 dri_bufmgr bufmgr;
73
74 _glthread_Mutex mutex; /**< for thread safety */
75
76 unsigned long low_offset;
77 unsigned long size;
78 void *virtual;
79
80 struct mem_block *heap;
81 struct block lru; /* only allocated, non-fence-pending blocks here */
82
83 unsigned buf_nr; /* for generating ids */
84
85 struct block referenced; /* after bmBufferOffset */
86 struct block on_hardware; /* after bmValidateBuffers */
87 struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
88 /* then to bufmgr->lru or free() */
89
90 unsigned int last_fence;
91
92 unsigned fail:1;
93 unsigned need_fence:1;
94 GLboolean thrashing;
95
96 /**
97 * Driver callback to emit a fence, returning the cookie.
98 *
99 * Currently, this also requires that a write flush be emitted before
100 * emitting the fence, but this should change.
101 */
102 unsigned int (*fence_emit)(void *private);
103 /** Driver callback to wait for a fence cookie to have passed. */
104 int (*fence_wait)(void *private, unsigned int fence_cookie);
105 /** Driver-supplied argument to driver callbacks */
106 void *driver_priv;
107 } dri_bufmgr_fake;
108
109 typedef struct _dri_bo_fake {
110 dri_bo bo;
111
112 unsigned id; /* debug only */
113 const char *name;
114
115 unsigned dirty:1;
116 unsigned int refcount;
117 /* Flags may consist of any of the DRM_BO flags, plus
118 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
119 * driver private flags.
120 */
121 unsigned int flags;
122 unsigned int alignment;
123 GLboolean is_static;
124
125 struct block *block;
126 void *backing_store;
127 void (*invalidate_cb)(dri_bufmgr *bufmgr, void * );
128 void *invalidate_ptr;
129 } dri_bo_fake;
130
131 typedef struct _dri_fence_fake {
132 dri_fence fence;
133
134 const char *name;
135 unsigned int refcount;
136 unsigned int fence_cookie;
137 GLboolean flushed;
138 } dri_fence_fake;
139
140 static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
141 unsigned int fence_cookie);
142
143 #define MAXFENCE 0x7fffffff
144
145 static GLboolean FENCE_LTE( unsigned a, unsigned b )
146 {
147 if (a == b)
148 return GL_TRUE;
149
150 if (a < b && b - a < (1<<24))
151 return GL_TRUE;
152
153 if (a > b && MAXFENCE - a + b < (1<<24))
154 return GL_TRUE;
155
156 return GL_FALSE;
157 }
158
159 static unsigned int
160 _fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
161 {
162 bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
163 return bufmgr_fake->last_fence;
164 }
165
166 static void
167 _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
168 {
169 int ret;
170
171 ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
172 if (ret != 0) {
173 _mesa_printf("%s:%d: Error %d waiting for fence.\n",
174 __FILE__, __LINE__);
175 abort();
176 }
177 clear_fenced(bufmgr_fake, cookie);
178 }
179
180 static GLboolean
181 _fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
182 {
183 /* Slight problem with wrap-around:
184 */
185 return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
186 }
187
188 /**
189 * Allocate a memory manager block for the buffer.
190 */
191 static GLboolean
192 alloc_block(dri_bo *bo)
193 {
194 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
195 dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
196 struct block *block = (struct block *)calloc(sizeof *block, 1);
197 unsigned int align_log2 = ffs(bo_fake->alignment);
198 GLuint sz;
199
200 if (!block)
201 return GL_FALSE;
202
203 sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
204
205 block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
206 if (!block->mem) {
207 free(block);
208 return GL_FALSE;
209 }
210
211 make_empty_list(block);
212
213 /* Insert at head or at tail???
214 */
215 insert_at_tail(&bufmgr_fake->lru, block);
216
217 block->virtual = bufmgr_fake->virtual + block->mem->ofs;
218 block->bo = bo;
219
220 bo_fake->block = block;
221
222 return GL_TRUE;
223 }
224
225 /* Release the card storage associated with buf:
226 */
227 static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
228 {
229 DBG("free block %p\n", block);
230
231 if (!block)
232 return;
233
234 if (block->referenced) {
235 _mesa_printf("tried to free block on referenced list\n");
236 assert(0);
237 }
238 else if (block->on_hardware) {
239 block->bo = NULL;
240 }
241 else if (block->fenced) {
242 block->bo = NULL;
243 }
244 else {
245 DBG(" - free immediately\n");
246 remove_from_list(block);
247
248 mmFreeMem(block->mem);
249 free(block);
250 }
251 }
252
253 static void
254 alloc_backing_store(dri_bo *bo)
255 {
256 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
257 assert(!bo_fake->backing_store);
258 assert(!(bo_fake->flags & (DRM_BO_FLAG_NO_EVICT|BM_NO_BACKING_STORE)));
259
260 bo_fake->backing_store = ALIGN_MALLOC(bo->size, 64);
261 }
262
263 static void
264 free_backing_store(dri_bo *bo)
265 {
266 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
267 assert(!(bo_fake->flags & (DRM_BO_FLAG_NO_EVICT|BM_NO_BACKING_STORE)));
268
269 if (bo_fake->backing_store) {
270 ALIGN_FREE(bo_fake->backing_store);
271 bo_fake->backing_store = NULL;
272 }
273 }
274
275 static void
276 set_dirty(dri_bo *bo)
277 {
278 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
279 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
280
281 if (bo_fake->flags & BM_NO_BACKING_STORE)
282 bo_fake->invalidate_cb(&bufmgr_fake->bufmgr, bo_fake->invalidate_ptr);
283
284 assert(!(bo_fake->flags & DRM_BO_FLAG_NO_EVICT));
285
286 DBG("set_dirty - buf %d\n", bo_fake->id);
287 bo_fake->dirty = 1;
288 }
289
290 static GLboolean
291 evict_lru(dri_bufmgr_fake *bufmgr_fake, GLuint max_fence)
292 {
293 struct block *block, *tmp;
294
295 DBG("%s\n", __FUNCTION__);
296
297 foreach_s(block, tmp, &bufmgr_fake->lru) {
298 dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
299
300 if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
301 continue;
302
303 if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
304 return 0;
305
306 set_dirty(&bo_fake->bo);
307 bo_fake->block = NULL;
308
309 free_block(bufmgr_fake, block);
310 return GL_TRUE;
311 }
312
313 return GL_FALSE;
314 }
315
316 #define foreach_s_rev(ptr, t, list) \
317 for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
318
319 static GLboolean
320 evict_mru(dri_bufmgr_fake *bufmgr_fake)
321 {
322 struct block *block, *tmp;
323
324 DBG("%s\n", __FUNCTION__);
325
326 foreach_s_rev(block, tmp, &bufmgr_fake->lru) {
327 dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
328
329 if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
330 continue;
331
332 set_dirty(&bo_fake->bo);
333 bo_fake->block = NULL;
334
335 free_block(bufmgr_fake, block);
336 return GL_TRUE;
337 }
338
339 return GL_FALSE;
340 }
341
342 /**
343 * Removes all objects from the fenced list older than the given fence.
344 */
345 static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
346 unsigned int fence_cookie)
347 {
348 struct block *block, *tmp;
349 int ret = 0;
350
351 foreach_s(block, tmp, &bufmgr_fake->fenced) {
352 assert(block->fenced);
353
354 if (_fence_test(bufmgr_fake, block->fence)) {
355
356 block->fenced = 0;
357
358 if (!block->bo) {
359 DBG("delayed free: offset %x sz %x\n",
360 block->mem->ofs, block->mem->size);
361 remove_from_list(block);
362 mmFreeMem(block->mem);
363 free(block);
364 }
365 else {
366 DBG("return to lru: offset %x sz %x\n",
367 block->mem->ofs, block->mem->size);
368 move_to_tail(&bufmgr_fake->lru, block);
369 }
370
371 ret = 1;
372 }
373 else {
374 /* Blocks are ordered by fence, so if one fails, all from
375 * here will fail also:
376 */
377 break;
378 }
379 }
380
381 /* Also check the referenced list:
382 */
383 foreach_s(block, tmp, &bufmgr_fake->referenced ) {
384 if (block->fenced && _fence_test(bufmgr_fake, block->fence)) {
385 block->fenced = 0;
386 }
387 }
388
389 DBG("%s: %d\n", __FUNCTION__, ret);
390 return ret;
391 }
392
393 static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
394 {
395 struct block *block, *tmp;
396
397 foreach_s (block, tmp, &bufmgr_fake->on_hardware) {
398 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
399 block->mem->size, block->bo, fence);
400 block->fence = fence;
401
402 block->on_hardware = 0;
403 block->fenced = 1;
404
405 /* Move to tail of pending list here
406 */
407 move_to_tail(&bufmgr_fake->fenced, block);
408 }
409
410 /* Also check the referenced list:
411 */
412 foreach_s (block, tmp, &bufmgr_fake->referenced) {
413 if (block->on_hardware) {
414 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
415 block->mem->size, block->bo, fence);
416
417 block->fence = fence;
418 block->on_hardware = 0;
419 block->fenced = 1;
420 }
421 }
422
423 assert(is_empty_list(&bufmgr_fake->on_hardware));
424 }
425
426 static GLboolean evict_and_alloc_block(dri_bo *bo)
427 {
428 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
429 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
430
431 assert(bo_fake->block == NULL);
432
433 /* Search for already free memory:
434 */
435 if (alloc_block(bo))
436 return GL_TRUE;
437
438 /* If we're not thrashing, allow lru eviction to dig deeper into
439 * recently used textures. We'll probably be thrashing soon:
440 */
441 if (!bufmgr_fake->thrashing) {
442 while (evict_lru(bufmgr_fake, 0))
443 if (alloc_block(bo))
444 return GL_TRUE;
445 }
446
447 /* Keep thrashing counter alive?
448 */
449 if (bufmgr_fake->thrashing)
450 bufmgr_fake->thrashing = 20;
451
452 /* Wait on any already pending fences - here we are waiting for any
453 * freed memory that has been submitted to hardware and fenced to
454 * become available:
455 */
456 while (!is_empty_list(&bufmgr_fake->fenced)) {
457 GLuint fence = bufmgr_fake->fenced.next->fence;
458 _fence_wait_internal(bufmgr_fake, fence);
459
460 if (alloc_block(bo))
461 return GL_TRUE;
462 }
463
464 if (!is_empty_list(&bufmgr_fake->on_hardware)) {
465 while (!is_empty_list(&bufmgr_fake->fenced)) {
466 GLuint fence = bufmgr_fake->fenced.next->fence;
467 _fence_wait_internal(bufmgr_fake, fence);
468 }
469
470 if (!bufmgr_fake->thrashing) {
471 DBG("thrashing\n");
472 }
473 bufmgr_fake->thrashing = 20;
474
475 if (alloc_block(bo))
476 return GL_TRUE;
477 }
478
479 while (evict_mru(bufmgr_fake))
480 if (alloc_block(bo))
481 return GL_TRUE;
482
483 DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
484
485 assert(is_empty_list(&bufmgr_fake->on_hardware));
486 assert(is_empty_list(&bufmgr_fake->fenced));
487
488 return GL_FALSE;
489 }
490
491 /***********************************************************************
492 * Public functions
493 */
494
495 /**
496 * Wait for hardware idle by emitting a fence and waiting for it.
497 */
498 static void
499 dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
500 {
501 unsigned int cookie;
502
503 cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
504 _fence_wait_internal(bufmgr_fake, cookie);
505 }
506
507 /* Specifically ignore texture memory sharing.
508 * -- just evict everything
509 * -- and wait for idle
510 */
511 void
512 dri_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
513 {
514 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
515
516 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
517 {
518 struct block *block, *tmp;
519
520 assert(is_empty_list(&bufmgr_fake->referenced));
521
522 bufmgr_fake->need_fence = 1;
523 bufmgr_fake->fail = 0;
524
525 /* Wait for hardware idle. We don't know where acceleration has been
526 * happening, so we'll need to wait anyway before letting anything get
527 * put on the card again.
528 */
529 dri_bufmgr_fake_wait_idle(bufmgr_fake);
530
531 assert(is_empty_list(&bufmgr_fake->fenced));
532 assert(is_empty_list(&bufmgr_fake->on_hardware));
533
534 foreach_s(block, tmp, &bufmgr_fake->lru) {
535 assert(_fence_test(bufmgr_fake, block->fence));
536 set_dirty(block->bo);
537 }
538 }
539 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
540 }
541
542 static dri_bo *
543 dri_fake_alloc(dri_bufmgr *bufmgr, const char *name,
544 unsigned long size, unsigned int alignment, unsigned int flags,
545 unsigned int hint)
546 {
547 dri_bufmgr_fake *bufmgr_fake;
548 dri_bo_fake *bo_fake;
549
550 bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
551
552 bo_fake = calloc(1, sizeof(*bo_fake));
553 if (!bo_fake)
554 return NULL;
555
556 bo_fake->bo.size = size;
557 bo_fake->bo.offset = -1;
558 bo_fake->bo.virtual = NULL;
559 bo_fake->bo.bufmgr = bufmgr;
560 bo_fake->refcount = 1;
561
562 /* Alignment must be a power of two */
563 assert((alignment & (alignment - 1)) == 0);
564 if (alignment == 0)
565 alignment = 1;
566 bo_fake->alignment = alignment;
567 bo_fake->id = ++bufmgr_fake->buf_nr;
568 bo_fake->name = name;
569 bo_fake->flags = flags;
570 bo_fake->is_static = GL_FALSE;
571
572 return &bo_fake->bo;
573 }
574
575 static dri_bo *
576 dri_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
577 unsigned long offset, unsigned long size, void *virtual,
578 unsigned int flags, unsigned int hint)
579 {
580 dri_bufmgr_fake *bufmgr_fake;
581 dri_bo_fake *bo_fake;
582
583 bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
584
585 bo_fake = calloc(1, sizeof(*bo_fake));
586 if (!bo_fake)
587 return NULL;
588
589 bo_fake->bo.size = size;
590 bo_fake->bo.offset = offset;
591 bo_fake->bo.virtual = virtual;
592 bo_fake->bo.bufmgr = bufmgr;
593 bo_fake->refcount = 1;
594 bo_fake->name = name;
595 bo_fake->flags = flags;
596 bo_fake->is_static = GL_TRUE;
597
598 return &bo_fake->bo;
599 }
600
601 static void
602 dri_fake_bo_reference(dri_bo *bo)
603 {
604 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
605 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
606
607 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
608 bo_fake->refcount++;
609 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
610 }
611
612 static void
613 dri_fake_bo_unreference(dri_bo *bo)
614 {
615 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
616 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
617
618 if (!bo)
619 return;
620
621 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
622 if (--bo_fake->refcount == 0) {
623 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
624 /* No remaining references, so free it */
625 if (bo_fake->block)
626 free_block(bufmgr_fake, bo_fake->block);
627 free_backing_store(bo);
628 free(bo);
629 return;
630 }
631 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
632 }
633
634 /**
635 * Map a buffer into bo->virtual, allocating either card memory space (If
636 * BM_NO_BACKING_STORE or DRM_BO_FLAG_NO_EVICT) or backing store, as necessary.
637 */
638 static int
639 dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
640 {
641 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
642 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
643
644 /* Static buffers are always mapped. */
645 if (bo_fake->is_static)
646 return 0;
647
648 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
649 {
650 DBG("bmMapBuffer %d\n", bo_fake->id);
651
652 if (bo->virtual != NULL) {
653 _mesa_printf("%s: already mapped\n", __FUNCTION__);
654 abort();
655 }
656 else if (bo_fake->flags & (BM_NO_BACKING_STORE|DRM_BO_FLAG_NO_EVICT)) {
657
658 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
659 DBG("%s: alloc failed\n", __FUNCTION__);
660 bufmgr_fake->fail = 1;
661 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
662 return 1;
663 }
664 else {
665 assert(bo_fake->block);
666 bo_fake->dirty = 0;
667
668 if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA))
669 dri_bufmgr_fake_wait_idle(bufmgr_fake);
670
671 bo->virtual = bo_fake->block->virtual;
672 }
673 }
674 else {
675 if (write_enable)
676 set_dirty(bo);
677
678 if (bo_fake->backing_store == 0)
679 alloc_backing_store(bo);
680
681 bo->virtual = bo_fake->backing_store;
682 }
683 }
684 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
685 return 0;
686 }
687
688 static int
689 dri_fake_bo_unmap(dri_bo *bo)
690 {
691 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
692
693 /* Static buffers are always mapped. */
694 if (bo_fake->is_static)
695 return 0;
696
697 if (bo == NULL)
698 return 0;
699
700 bo->virtual = NULL;
701
702 return 0;
703 }
704
705 static int
706 dri_fake_bo_validate(dri_bo *bo, unsigned int flags)
707 {
708 dri_bufmgr_fake *bufmgr_fake;
709 dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
710
711 /* XXX: Sanity-check whether we've already validated this one under
712 * different flags. See drmAddValidateItem().
713 */
714
715 bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
716
717 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
718 {
719 /* Allocate the card memory */
720 if (!bo_fake->block && !evict_and_alloc_block(bo)) {
721 bufmgr_fake->fail = 1;
722 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
723 return -1;
724 }
725
726 assert(bo_fake->block);
727 assert(bo_fake->block->bo == &bo_fake->bo);
728
729 DBG("Add buf %d (block %p, dirty %d) to referenced list\n",
730 bo_fake->id, bo_fake->block, bo_fake->dirty);
731
732 move_to_tail(&bufmgr_fake->referenced, bo_fake->block);
733 bo_fake->block->referenced = 1;
734
735 bo->offset = bo_fake->block->mem->ofs;
736
737 /* Upload the buffer contents if necessary */
738 if (bo_fake->dirty) {
739 DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", bo_fake->id,
740 bo_fake->name, bo->size, block->mem->ofs);
741
742 assert(!(bo_fake->flags &
743 (BM_NO_BACKING_STORE|DRM_BO_FLAG_NO_EVICT)));
744
745 /* Actually, should be able to just wait for a fence on the memory,
746 * which we would be tracking when we free it. Waiting for idle is
747 * a sufficiently large hammer for now.
748 */
749 dri_bufmgr_fake_wait_idle(bufmgr_fake);
750
751 memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
752
753 bo_fake->block->referenced = 0;
754 bo_fake->block->on_hardware = 1;
755 move_to_tail(&bufmgr_fake->on_hardware, bo_fake->block);
756 }
757
758 bufmgr_fake->need_fence = 1;
759 }
760 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
761
762 return 0;
763 }
764
765 static dri_fence *
766 dri_fake_fence_validated(dri_bufmgr *bufmgr, const char *name,
767 GLboolean flushed)
768 {
769 dri_fence_fake *fence_fake;
770 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
771 unsigned int cookie;
772
773 fence_fake = malloc(sizeof(*fence_fake));
774 if (!fence_fake)
775 return NULL;
776
777 fence_fake->refcount = 1;
778 fence_fake->name = name;
779 fence_fake->flushed = flushed;
780 fence_fake->fence.bufmgr = bufmgr;
781
782 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
783 cookie = _fence_emit_internal(bufmgr_fake);
784 fence_fake->fence_cookie = cookie;
785 fence_blocks(bufmgr_fake, cookie);
786 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
787
788 return &fence_fake->fence;
789 }
790
791 static void
792 dri_fake_fence_reference(dri_fence *fence)
793 {
794 dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
795 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr;
796
797 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
798 ++fence_fake->refcount;
799 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
800 }
801
802 static void
803 dri_fake_fence_unreference(dri_fence *fence)
804 {
805 dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
806 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr;
807
808 if (!fence)
809 return;
810
811 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
812 if (--fence_fake->refcount == 0) {
813 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
814 free(fence);
815 return;
816 }
817 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
818 }
819
820 static void
821 dri_fake_fence_wait(dri_fence *fence)
822 {
823 dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
824 dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr;
825
826 _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
827 _fence_wait_internal(bufmgr_fake->driver_priv, fence_fake->fence_cookie);
828 _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
829 }
830
831 dri_bufmgr *
832 dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
833 unsigned long size,
834 unsigned int (*fence_emit)(void *private),
835 int (*fence_wait)(void *private, unsigned int cookie),
836 void *driver_priv)
837 {
838 dri_bufmgr_fake *bufmgr_fake;
839
840 bufmgr_fake = malloc(sizeof(*bufmgr_fake));
841
842 /* Initialize allocator */
843 make_empty_list(&bufmgr_fake->referenced);
844 make_empty_list(&bufmgr_fake->fenced);
845 make_empty_list(&bufmgr_fake->on_hardware);
846 make_empty_list(&bufmgr_fake->lru);
847
848 bufmgr_fake->low_offset = low_offset;
849 bufmgr_fake->virtual = low_virtual;
850 bufmgr_fake->size = size;
851 bufmgr_fake->heap = mmInit(low_offset, size);
852
853 _glthread_INIT_MUTEX(bufmgr_fake->mutex);
854
855 /* Hook in methods */
856 bufmgr_fake->bufmgr.bo_alloc = dri_fake_alloc;
857 bufmgr_fake->bufmgr.bo_alloc_static = dri_fake_alloc_static;
858 bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
859 bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
860 bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
861 bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
862 bufmgr_fake->bufmgr.bo_validate = dri_fake_bo_validate;
863 bufmgr_fake->bufmgr.fence_validated = dri_fake_fence_validated;
864 bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait;
865 bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference;
866 bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference;
867
868 bufmgr_fake->fence_emit = fence_emit;
869 bufmgr_fake->fence_wait = fence_wait;
870 bufmgr_fake->driver_priv = driver_priv;
871
872 return &bufmgr_fake->bufmgr;
873 }