[965] Remove AUB file support.
[mesa.git] / src / mesa / drivers / dri / i965 / bufmgr_fake.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
31 *
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
35 */
36 #include "bufmgr.h"
37
38 #include "intel_context.h"
39 #include "intel_ioctl.h"
40 #include "intel_batchbuffer.h"
41
42 #include "simple_list.h"
43 #include "mm.h"
44 #include "imports.h"
45
46 #define BM_POOL_MAX 8
47
48 /* Internal flags:
49 */
50 #define BM_NO_BACKING_STORE 0x2000
51 #define BM_NO_FENCE_SUBDATA 0x4000
52
53
54 static int check_fenced( struct intel_context *intel );
55
56 static int nr_attach = 0;
57
58 /* Wrapper around mm.c's mem_block, which understands that you must
59 * wait for fences to expire before memory can be freed. This is
60 * specific to our use of memcpy for uploads - an upload that was
61 * processed through the command queue wouldn't need to care about
62 * fences.
63 */
64 struct block {
65 struct block *next, *prev;
66 struct pool *pool; /* BM_MEM_AGP */
67 struct mem_block *mem; /* BM_MEM_AGP */
68
69 unsigned referenced:1;
70 unsigned on_hardware:1;
71 unsigned fenced:1;
72
73
74 unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
75
76 struct buffer *buf;
77 void *virtual;
78 };
79
80
81 struct buffer {
82 unsigned id; /* debug only */
83 const char *name;
84 unsigned size;
85
86 unsigned mapped:1;
87 unsigned dirty:1;
88 unsigned alignment:13;
89 unsigned flags:16;
90
91 struct block *block;
92 void *backing_store;
93 void (*invalidate_cb)( struct intel_context *, void * );
94 void *invalidate_ptr;
95 };
96
97 struct pool {
98 unsigned size;
99 unsigned low_offset;
100 struct buffer *static_buffer;
101 unsigned flags;
102 struct mem_block *heap;
103 void *virtual;
104 struct block lru; /* only allocated, non-fence-pending blocks here */
105 };
106
107 struct bufmgr {
108 _glthread_Mutex mutex; /**< for thread safety */
109 struct pool pool[BM_POOL_MAX];
110 unsigned nr_pools;
111
112 unsigned buf_nr; /* for generating ids */
113
114 struct block referenced; /* after bmBufferOffset */
115 struct block on_hardware; /* after bmValidateBuffers */
116 struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
117 /* then to pool->lru or free() */
118
119 unsigned ctxId;
120 unsigned last_fence;
121 unsigned free_on_hardware;
122
123 unsigned fail:1;
124 unsigned need_fence:1;
125 };
126
127 #define MAXFENCE 0x7fffffff
128
129 static GLboolean FENCE_LTE( unsigned a, unsigned b )
130 {
131 if (a == b)
132 return GL_TRUE;
133
134 if (a < b && b - a < (1<<24))
135 return GL_TRUE;
136
137 if (a > b && MAXFENCE - a + b < (1<<24))
138 return GL_TRUE;
139
140 return GL_FALSE;
141 }
142
143 int bmTestFence( struct intel_context *intel, unsigned fence )
144 {
145 /* Slight problem with wrap-around:
146 */
147 return fence == 0 || FENCE_LTE(fence, intel->sarea->last_dispatch);
148 }
149
150 #define LOCK(bm) \
151 int dolock = nr_attach > 1; \
152 if (dolock) _glthread_LOCK_MUTEX(bm->mutex)
153
154 #define UNLOCK(bm) \
155 if (dolock) _glthread_UNLOCK_MUTEX(bm->mutex)
156
157
158
159 static GLboolean alloc_from_pool( struct intel_context *intel,
160 unsigned pool_nr,
161 struct buffer *buf )
162 {
163 struct bufmgr *bm = intel->bm;
164 struct pool *pool = &bm->pool[pool_nr];
165 struct block *block = (struct block *)calloc(sizeof *block, 1);
166 GLuint sz, align = (1<<buf->alignment);
167
168 if (!block)
169 return GL_FALSE;
170
171 sz = (buf->size + align-1) & ~(align-1);
172
173 block->mem = mmAllocMem(pool->heap,
174 sz,
175 buf->alignment, 0);
176 if (!block->mem) {
177 free(block);
178 return GL_FALSE;
179 }
180
181 make_empty_list(block);
182
183 /* Insert at head or at tail???
184 */
185 insert_at_tail(&pool->lru, block);
186
187 block->pool = pool;
188 block->virtual = pool->virtual + block->mem->ofs;
189 block->buf = buf;
190
191 buf->block = block;
192
193 return GL_TRUE;
194 }
195
196
197
198
199
200
201
202
203 /* Release the card storage associated with buf:
204 */
205 static void free_block( struct intel_context *intel, struct block *block )
206 {
207 DBG("free block %p\n", block);
208
209 if (!block)
210 return;
211
212 check_fenced(intel);
213
214 if (block->referenced) {
215 _mesa_printf("tried to free block on referenced list\n");
216 assert(0);
217 }
218 else if (block->on_hardware) {
219 block->buf = NULL;
220 intel->bm->free_on_hardware += block->mem->size;
221 }
222 else if (block->fenced) {
223 block->buf = NULL;
224 }
225 else {
226 DBG(" - free immediately\n");
227 remove_from_list(block);
228
229 mmFreeMem(block->mem);
230 free(block);
231 }
232 }
233
234
235 static void alloc_backing_store( struct intel_context *intel, struct buffer *buf )
236 {
237 assert(!buf->backing_store);
238 assert(!(buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)));
239
240 buf->backing_store = ALIGN_MALLOC(buf->size, 64);
241 }
242
243 static void free_backing_store( struct intel_context *intel, struct buffer *buf )
244 {
245 assert(!(buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)));
246
247 if (buf->backing_store) {
248 ALIGN_FREE(buf->backing_store);
249 buf->backing_store = NULL;
250 }
251 }
252
253
254
255
256
257
258 static void set_dirty( struct intel_context *intel,
259 struct buffer *buf )
260 {
261 if (buf->flags & BM_NO_BACKING_STORE)
262 buf->invalidate_cb(intel, buf->invalidate_ptr);
263
264 assert(!(buf->flags & BM_NO_EVICT));
265
266 DBG("set_dirty - buf %d\n", buf->id);
267 buf->dirty = 1;
268 }
269
270
271 static int evict_lru( struct intel_context *intel, GLuint max_fence, GLuint *pool )
272 {
273 struct bufmgr *bm = intel->bm;
274 struct block *block, *tmp;
275 int i;
276
277 DBG("%s\n", __FUNCTION__);
278
279 for (i = 0; i < bm->nr_pools; i++) {
280 if (!(bm->pool[i].flags & BM_NO_EVICT)) {
281 foreach_s(block, tmp, &bm->pool[i].lru) {
282
283 if (block->buf &&
284 (block->buf->flags & BM_NO_FENCE_SUBDATA))
285 continue;
286
287 if (block->fence && max_fence &&
288 !FENCE_LTE(block->fence, max_fence))
289 return 0;
290
291 set_dirty(intel, block->buf);
292 block->buf->block = NULL;
293
294 free_block(intel, block);
295 *pool = i;
296 return 1;
297 }
298 }
299 }
300
301
302 return 0;
303 }
304
305
306 #define foreach_s_rev(ptr, t, list) \
307 for(ptr=(list)->prev,t=(ptr)->prev; list != ptr; ptr=t, t=(t)->prev)
308
309 static int evict_mru( struct intel_context *intel, GLuint *pool )
310 {
311 struct bufmgr *bm = intel->bm;
312 struct block *block, *tmp;
313 int i;
314
315 DBG("%s\n", __FUNCTION__);
316
317 for (i = 0; i < bm->nr_pools; i++) {
318 if (!(bm->pool[i].flags & BM_NO_EVICT)) {
319 foreach_s_rev(block, tmp, &bm->pool[i].lru) {
320
321 if (block->buf &&
322 (block->buf->flags & BM_NO_FENCE_SUBDATA))
323 continue;
324
325 set_dirty(intel, block->buf);
326 block->buf->block = NULL;
327
328 free_block(intel, block);
329 *pool = i;
330 return 1;
331 }
332 }
333 }
334
335
336 return 0;
337 }
338
339
340 static int check_fenced( struct intel_context *intel )
341 {
342 struct bufmgr *bm = intel->bm;
343 struct block *block, *tmp;
344 int ret = 0;
345
346 foreach_s(block, tmp, &bm->fenced ) {
347 assert(block->fenced);
348
349 if (bmTestFence(intel, block->fence)) {
350
351 block->fenced = 0;
352
353 if (!block->buf) {
354 DBG("delayed free: offset %x sz %x\n", block->mem->ofs, block->mem->size);
355 remove_from_list(block);
356 mmFreeMem(block->mem);
357 free(block);
358 }
359 else {
360 DBG("return to lru: offset %x sz %x\n", block->mem->ofs, block->mem->size);
361 move_to_tail(&block->pool->lru, block);
362 }
363
364 ret = 1;
365 }
366 else {
367 /* Blocks are ordered by fence, so if one fails, all from
368 * here will fail also:
369 */
370 break;
371 }
372 }
373
374 /* Also check the referenced list:
375 */
376 foreach_s(block, tmp, &bm->referenced ) {
377 if (block->fenced &&
378 bmTestFence(intel, block->fence)) {
379 block->fenced = 0;
380 }
381 }
382
383
384 DBG("%s: %d\n", __FUNCTION__, ret);
385 return ret;
386 }
387
388
389
390 static void fence_blocks( struct intel_context *intel,
391 unsigned fence )
392 {
393 struct bufmgr *bm = intel->bm;
394 struct block *block, *tmp;
395
396 foreach_s (block, tmp, &bm->on_hardware) {
397 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
398 block->mem->size, block->buf, fence);
399 block->fence = fence;
400
401 block->on_hardware = 0;
402 block->fenced = 1;
403
404 /* Move to tail of pending list here
405 */
406 move_to_tail(&bm->fenced, block);
407 }
408
409 /* Also check the referenced list:
410 */
411 foreach_s (block, tmp, &bm->referenced) {
412 if (block->on_hardware) {
413 DBG("Fence block %p (sz 0x%x buf %p) with fence %d\n", block,
414 block->mem->size, block->buf, fence);
415
416 block->fence = fence;
417 block->on_hardware = 0;
418 block->fenced = 1;
419 }
420 }
421
422
423 bm->last_fence = fence;
424 assert(is_empty_list(&bm->on_hardware));
425 }
426
427
428
429
430 static GLboolean alloc_block( struct intel_context *intel,
431 struct buffer *buf )
432 {
433 struct bufmgr *bm = intel->bm;
434 int i;
435
436 assert(intel->locked);
437
438 DBG("%s 0x%x bytes (%s)\n", __FUNCTION__, buf->size, buf->name);
439
440 for (i = 0; i < bm->nr_pools; i++) {
441 if (!(bm->pool[i].flags & BM_NO_ALLOC) &&
442 alloc_from_pool(intel, i, buf)) {
443
444 DBG("%s --> 0x%x (sz %x)\n", __FUNCTION__,
445 buf->block->mem->ofs, buf->block->mem->size);
446
447 return GL_TRUE;
448 }
449 }
450
451 DBG("%s --> fail\n", __FUNCTION__);
452 return GL_FALSE;
453 }
454
455
456 static GLboolean evict_and_alloc_block( struct intel_context *intel,
457 struct buffer *buf )
458 {
459 GLuint pool;
460 struct bufmgr *bm = intel->bm;
461
462 assert(buf->block == NULL);
463
464 /* Put a cap on the amount of free memory we'll allow to accumulate
465 * before emitting a fence.
466 */
467 if (bm->free_on_hardware > 1 * 1024 * 1024) {
468 DBG("fence for free space: %x\n", bm->free_on_hardware);
469 bmSetFence(intel);
470 }
471
472 /* Search for already free memory:
473 */
474 if (alloc_block(intel, buf))
475 return GL_TRUE;
476
477 /* Look for memory that may have become free:
478 */
479 if (check_fenced(intel) &&
480 alloc_block(intel, buf))
481 return GL_TRUE;
482
483 /* Look for memory blocks not used for >1 frame:
484 */
485 while (evict_lru(intel, intel->second_last_swap_fence, &pool))
486 if (alloc_from_pool(intel, pool, buf))
487 return GL_TRUE;
488
489 /* If we're not thrashing, allow lru eviction to dig deeper into
490 * recently used textures. We'll probably be thrashing soon:
491 */
492 if (!intel->thrashing) {
493 while (evict_lru(intel, 0, &pool))
494 if (alloc_from_pool(intel, pool, buf))
495 return GL_TRUE;
496 }
497
498 /* Keep thrashing counter alive?
499 */
500 if (intel->thrashing)
501 intel->thrashing = 20;
502
503 /* Wait on any already pending fences - here we are waiting for any
504 * freed memory that has been submitted to hardware and fenced to
505 * become available:
506 */
507 while (!is_empty_list(&bm->fenced)) {
508 GLuint fence = bm->fenced.next->fence;
509 bmFinishFence(intel, fence);
510
511 if (alloc_block(intel, buf))
512 return GL_TRUE;
513 }
514
515
516 /*
517 */
518 if (!is_empty_list(&bm->on_hardware)) {
519 bmSetFence(intel);
520
521 while (!is_empty_list(&bm->fenced)) {
522 GLuint fence = bm->fenced.next->fence;
523 bmFinishFence(intel, fence);
524 }
525
526 if (!intel->thrashing) {
527 DBG("thrashing\n");
528 }
529 intel->thrashing = 20;
530
531 if (alloc_block(intel, buf))
532 return GL_TRUE;
533 }
534
535 while (evict_mru(intel, &pool))
536 if (alloc_from_pool(intel, pool, buf))
537 return GL_TRUE;
538
539 DBG("%s 0x%x bytes failed\n", __FUNCTION__, buf->size);
540
541 assert(is_empty_list(&bm->on_hardware));
542 assert(is_empty_list(&bm->fenced));
543
544 return GL_FALSE;
545 }
546
547
548
549
550
551
552
553
554
555
556 /***********************************************************************
557 * Public functions
558 */
559
560
561 /* The initialization functions are skewed in the fake implementation.
562 * This call would be to attach to an existing manager, rather than to
563 * create a local one.
564 */
565 struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
566 {
567 _glthread_DECLARE_STATIC_MUTEX(initMutex);
568 static struct bufmgr bm;
569
570 /* This function needs a mutex of its own...
571 */
572 _glthread_LOCK_MUTEX(initMutex);
573
574 if (nr_attach == 0) {
575 _glthread_INIT_MUTEX(bm.mutex);
576
577 make_empty_list(&bm.referenced);
578 make_empty_list(&bm.fenced);
579 make_empty_list(&bm.on_hardware);
580
581 /* The context id of any of the share group. This won't be used
582 * in communication with the kernel, so it doesn't matter if
583 * this context is eventually deleted.
584 */
585 bm.ctxId = intel->hHWContext;
586 }
587
588 nr_attach++;
589
590 _glthread_UNLOCK_MUTEX(initMutex);
591
592 return &bm;
593 }
594
595
596
597 /* The virtual pointer would go away in a true implementation.
598 */
599 int bmInitPool( struct intel_context *intel,
600 unsigned long low_offset,
601 void *low_virtual,
602 unsigned long size,
603 unsigned flags)
604 {
605 struct bufmgr *bm = intel->bm;
606 int retval = 0;
607
608 LOCK(bm);
609 {
610 GLuint i;
611
612 for (i = 0; i < bm->nr_pools; i++) {
613 if (bm->pool[i].low_offset == low_offset &&
614 bm->pool[i].size == size) {
615 retval = i;
616 goto out;
617 }
618 }
619
620
621 if (bm->nr_pools >= BM_POOL_MAX)
622 retval = -1;
623 else {
624 i = bm->nr_pools++;
625
626 DBG("bmInitPool %d low_offset %x sz %x\n",
627 i, low_offset, size);
628
629 bm->pool[i].low_offset = low_offset;
630 bm->pool[i].size = size;
631 bm->pool[i].heap = mmInit( low_offset, size );
632 bm->pool[i].virtual = low_virtual - low_offset;
633 bm->pool[i].flags = flags;
634
635 make_empty_list(&bm->pool[i].lru);
636
637 retval = i;
638 }
639 }
640 out:
641 UNLOCK(bm);
642 return retval;
643 }
644
645 static struct buffer *do_GenBuffer(struct intel_context *intel, const char *name, int align)
646 {
647 struct bufmgr *bm = intel->bm;
648 struct buffer *buf = calloc(sizeof(*buf), 1);
649
650 buf->id = ++bm->buf_nr;
651 buf->name = name;
652 buf->alignment = align;
653 buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
654
655 return buf;
656 }
657
658
659 void *bmFindVirtual( struct intel_context *intel,
660 unsigned int offset,
661 size_t sz )
662 {
663 struct bufmgr *bm = intel->bm;
664 int i;
665
666 for (i = 0; i < bm->nr_pools; i++)
667 if (offset >= bm->pool[i].low_offset &&
668 offset + sz <= bm->pool[i].low_offset + bm->pool[i].size)
669 return bm->pool[i].virtual + offset;
670
671 return NULL;
672 }
673
674
675 void bmGenBuffers(struct intel_context *intel,
676 const char *name, unsigned n,
677 struct buffer **buffers,
678 int align )
679 {
680 struct bufmgr *bm = intel->bm;
681 LOCK(bm);
682 {
683 int i;
684
685 for (i = 0; i < n; i++)
686 buffers[i] = do_GenBuffer(intel, name, align);
687 }
688 UNLOCK(bm);
689 }
690
691
692 void bmDeleteBuffers(struct intel_context *intel, unsigned n, struct buffer **buffers)
693 {
694 struct bufmgr *bm = intel->bm;
695
696 LOCK(bm);
697 {
698 unsigned i;
699
700 for (i = 0; i < n; i++) {
701 struct buffer *buf = buffers[i];
702
703 if (buf && buf->block)
704 free_block(intel, buf->block);
705
706 if (buf)
707 free(buf);
708 }
709 }
710 UNLOCK(bm);
711 }
712
713
714
715
716 /* Hook to inform faked buffer manager about fixed-position
717 * front,depth,back buffers. These may move to a fully memory-managed
718 * scheme, or they may continue to be managed as is. It will probably
719 * be useful to pass a fixed offset here one day.
720 */
721 struct buffer *bmGenBufferStatic(struct intel_context *intel,
722 unsigned pool )
723 {
724 struct bufmgr *bm = intel->bm;
725 struct buffer *buf;
726 LOCK(bm);
727 {
728 assert(bm->pool[pool].flags & BM_NO_EVICT);
729 assert(bm->pool[pool].flags & BM_NO_MOVE);
730
731 if (bm->pool[pool].static_buffer)
732 buf = bm->pool[pool].static_buffer;
733 else {
734 buf = do_GenBuffer(intel, "static", 12);
735
736 bm->pool[pool].static_buffer = buf;
737 assert(!buf->block);
738
739 buf->size = bm->pool[pool].size;
740 buf->flags = bm->pool[pool].flags;
741 buf->alignment = 12;
742
743 if (!alloc_from_pool(intel, pool, buf))
744 assert(0);
745 }
746 }
747 UNLOCK(bm);
748 return buf;
749 }
750
751
752 static void wait_quiescent(struct intel_context *intel,
753 struct block *block)
754 {
755 if (block->on_hardware) {
756 assert(intel->bm->need_fence);
757 bmSetFence(intel);
758 assert(!block->on_hardware);
759 }
760
761
762 if (block->fenced) {
763 bmFinishFence(intel, block->fence);
764 }
765
766 assert(!block->on_hardware);
767 assert(!block->fenced);
768 }
769
770
771
772 /* If buffer size changes, free and reallocate. Otherwise update in
773 * place.
774 */
775 int bmBufferData(struct intel_context *intel,
776 struct buffer *buf,
777 unsigned size,
778 const void *data,
779 unsigned flags )
780 {
781 struct bufmgr *bm = intel->bm;
782 int retval = 0;
783
784 LOCK(bm);
785 {
786 DBG("bmBufferData %d sz 0x%x data: %p\n", buf->id, size, data);
787
788 assert(!buf->mapped);
789
790 if (buf->block) {
791 struct block *block = buf->block;
792
793 /* Optimistic check to see if we can reuse the block -- not
794 * required for correctness:
795 */
796 if (block->fenced)
797 check_fenced(intel);
798
799 if (block->on_hardware ||
800 block->fenced ||
801 (buf->size && buf->size != size) ||
802 (data == NULL)) {
803
804 assert(!block->referenced);
805
806 free_block(intel, block);
807 buf->block = NULL;
808 buf->dirty = 1;
809 }
810 }
811
812 buf->size = size;
813 if (buf->block) {
814 assert (buf->block->mem->size >= size);
815 }
816
817 if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
818
819 assert(intel->locked || data == NULL);
820
821 if (data != NULL) {
822 if (!buf->block && !evict_and_alloc_block(intel, buf)) {
823 bm->fail = 1;
824 retval = -1;
825 goto out;
826 }
827
828 wait_quiescent(intel, buf->block);
829
830 DBG("bmBufferData %d offset 0x%x sz 0x%x\n",
831 buf->id, buf->block->mem->ofs, size);
832
833 assert(buf->block->virtual == buf->block->pool->virtual + buf->block->mem->ofs);
834
835 do_memcpy(buf->block->virtual, data, size);
836 }
837 buf->dirty = 0;
838 }
839 else {
840 DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
841 set_dirty(intel, buf);
842 free_backing_store(intel, buf);
843
844 if (data != NULL) {
845 alloc_backing_store(intel, buf);
846 do_memcpy(buf->backing_store, data, size);
847 }
848 }
849 }
850 out:
851 UNLOCK(bm);
852 return retval;
853 }
854
855
856 /* Update the buffer in place, in whatever space it is currently resident:
857 */
858 int bmBufferSubData(struct intel_context *intel,
859 struct buffer *buf,
860 unsigned offset,
861 unsigned size,
862 const void *data )
863 {
864 struct bufmgr *bm = intel->bm;
865 int retval = 0;
866
867 if (size == 0)
868 return 0;
869
870 LOCK(bm);
871 {
872 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf->id, offset, size);
873
874 assert(offset+size <= buf->size);
875
876 if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
877
878 assert(intel->locked);
879
880 if (!buf->block && !evict_and_alloc_block(intel, buf)) {
881 bm->fail = 1;
882 retval = -1;
883 goto out;
884 }
885
886 if (!(buf->flags & BM_NO_FENCE_SUBDATA))
887 wait_quiescent(intel, buf->block);
888
889 buf->dirty = 0;
890
891 do_memcpy(buf->block->virtual + offset, data, size);
892 }
893 else {
894 DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
895 set_dirty(intel, buf);
896
897 if (buf->backing_store == NULL)
898 alloc_backing_store(intel, buf);
899
900 do_memcpy(buf->backing_store + offset, data, size);
901 }
902 }
903 out:
904 UNLOCK(bm);
905 return retval;
906 }
907
908 unsigned bmBufferOffset(struct intel_context *intel,
909 struct buffer *buf)
910 {
911 struct bufmgr *bm = intel->bm;
912 unsigned retval = 0;
913
914 LOCK(bm);
915 {
916 assert(intel->locked);
917
918 if (!buf->block &&
919 !evict_and_alloc_block(intel, buf)) {
920 bm->fail = 1;
921 retval = ~0;
922 }
923 else {
924 assert(buf->block);
925 assert(buf->block->buf == buf);
926
927 DBG("Add buf %d (block %p, dirty %d) to referenced list\n", buf->id, buf->block,
928 buf->dirty);
929
930 move_to_tail(&bm->referenced, buf->block);
931 buf->block->referenced = 1;
932
933 retval = buf->block->mem->ofs;
934 }
935 }
936 UNLOCK(bm);
937
938 return retval;
939 }
940
941
942
943 /* Extract data from the buffer:
944 */
945 void bmBufferGetSubData(struct intel_context *intel,
946 struct buffer *buf,
947 unsigned offset,
948 unsigned size,
949 void *data )
950 {
951 struct bufmgr *bm = intel->bm;
952
953 LOCK(bm);
954 {
955 DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buf->id, offset, size);
956
957 if (buf->flags & (BM_NO_EVICT|BM_NO_BACKING_STORE)) {
958 if (buf->block && size) {
959 wait_quiescent(intel, buf->block);
960 do_memcpy(data, buf->block->virtual + offset, size);
961 }
962 }
963 else {
964 if (buf->backing_store && size) {
965 do_memcpy(data, buf->backing_store + offset, size);
966 }
967 }
968 }
969 UNLOCK(bm);
970 }
971
972
973 /* Return a pointer to whatever space the buffer is currently resident in:
974 */
975 void *bmMapBuffer( struct intel_context *intel,
976 struct buffer *buf,
977 unsigned flags )
978 {
979 struct bufmgr *bm = intel->bm;
980 void *retval = NULL;
981
982 LOCK(bm);
983 {
984 DBG("bmMapBuffer %d\n", buf->id);
985
986 if (buf->mapped) {
987 _mesa_printf("%s: already mapped\n", __FUNCTION__);
988 retval = NULL;
989 }
990 else if (buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)) {
991
992 assert(intel->locked);
993
994 if (!buf->block && !evict_and_alloc_block(intel, buf)) {
995 DBG("%s: alloc failed\n", __FUNCTION__);
996 bm->fail = 1;
997 retval = NULL;
998 }
999 else {
1000 assert(buf->block);
1001 buf->dirty = 0;
1002
1003 if (!(buf->flags & BM_NO_FENCE_SUBDATA))
1004 wait_quiescent(intel, buf->block);
1005
1006 buf->mapped = 1;
1007 retval = buf->block->virtual;
1008 }
1009 }
1010 else {
1011 DBG("%s - set buf %d dirty\n", __FUNCTION__, buf->id);
1012 set_dirty(intel, buf);
1013
1014 if (buf->backing_store == 0)
1015 alloc_backing_store(intel, buf);
1016
1017 buf->mapped = 1;
1018 retval = buf->backing_store;
1019 }
1020 }
1021 UNLOCK(bm);
1022 return retval;
1023 }
1024
1025 void bmUnmapBuffer( struct intel_context *intel, struct buffer *buf )
1026 {
1027 struct bufmgr *bm = intel->bm;
1028
1029 LOCK(bm);
1030 {
1031 DBG("bmUnmapBuffer %d\n", buf->id);
1032 buf->mapped = 0;
1033 }
1034 UNLOCK(bm);
1035 }
1036
1037
1038
1039
1040 /* This is the big hack that turns on BM_NO_BACKING_STORE. Basically
1041 * says that an external party will maintain the backing store, eg
1042 * Mesa's local copy of texture data.
1043 */
1044 void bmBufferSetInvalidateCB(struct intel_context *intel,
1045 struct buffer *buf,
1046 void (*invalidate_cb)( struct intel_context *, void *ptr ),
1047 void *ptr,
1048 GLboolean dont_fence_subdata)
1049 {
1050 struct bufmgr *bm = intel->bm;
1051
1052 LOCK(bm);
1053 {
1054 if (buf->backing_store)
1055 free_backing_store(intel, buf);
1056
1057 buf->flags |= BM_NO_BACKING_STORE;
1058
1059 if (dont_fence_subdata)
1060 buf->flags |= BM_NO_FENCE_SUBDATA;
1061
1062 DBG("bmBufferSetInvalidateCB set buf %d dirty\n", buf->id);
1063 buf->dirty = 1;
1064 buf->invalidate_cb = invalidate_cb;
1065 buf->invalidate_ptr = ptr;
1066
1067 /* Note that it is invalid right from the start. Also note
1068 * invalidate_cb is called with the bufmgr locked, so cannot
1069 * itself make bufmgr calls.
1070 */
1071 invalidate_cb( intel, ptr );
1072 }
1073 UNLOCK(bm);
1074 }
1075
1076
1077
1078
1079
1080
1081
1082 /* This is only protected against thread interactions by the DRI lock
1083 * and the policy of ensuring that all dma is flushed prior to
1084 * releasing that lock. Otherwise you might have two threads building
1085 * up a list of buffers to validate at once.
1086 */
1087 int bmValidateBuffers( struct intel_context *intel )
1088 {
1089 struct bufmgr *bm = intel->bm;
1090 int retval = 0;
1091
1092 LOCK(bm);
1093 {
1094 DBG("%s fail %d\n", __FUNCTION__, bm->fail);
1095 assert(intel->locked);
1096
1097 if (!bm->fail) {
1098 struct block *block, *tmp;
1099
1100 foreach_s(block, tmp, &bm->referenced) {
1101 struct buffer *buf = block->buf;
1102
1103 DBG("Validate buf %d / block %p / dirty %d\n", buf->id, block, buf->dirty);
1104
1105 /* Upload the buffer contents if necessary:
1106 */
1107 if (buf->dirty) {
1108 DBG("Upload dirty buf %d (%s) sz %d offset 0x%x\n", buf->id,
1109 buf->name, buf->size, block->mem->ofs);
1110
1111 assert(!(buf->flags & (BM_NO_BACKING_STORE|BM_NO_EVICT)));
1112
1113 wait_quiescent(intel, buf->block);
1114
1115 do_memcpy(buf->block->virtual,
1116 buf->backing_store,
1117 buf->size);
1118
1119 buf->dirty = 0;
1120 }
1121
1122 block->referenced = 0;
1123 block->on_hardware = 1;
1124 move_to_tail(&bm->on_hardware, block);
1125 }
1126
1127 bm->need_fence = 1;
1128 }
1129
1130 retval = bm->fail ? -1 : 0;
1131 }
1132 UNLOCK(bm);
1133
1134
1135 if (retval != 0)
1136 DBG("%s failed\n", __FUNCTION__);
1137
1138 return retval;
1139 }
1140
1141
1142
1143
1144 void bmReleaseBuffers( struct intel_context *intel )
1145 {
1146 struct bufmgr *bm = intel->bm;
1147
1148 LOCK(bm);
1149 {
1150 struct block *block, *tmp;
1151
1152 foreach_s (block, tmp, &bm->referenced) {
1153
1154 DBG("remove block %p from referenced list\n", block);
1155
1156 if (block->on_hardware) {
1157 /* Return to the on-hardware list.
1158 */
1159 move_to_tail(&bm->on_hardware, block);
1160 }
1161 else if (block->fenced) {
1162 struct block *s;
1163
1164 /* Hmm - have to scan the fenced list to insert the
1165 * buffers in order. This is O(nm), but rare and the
1166 * numbers are low.
1167 */
1168 foreach (s, &bm->fenced) {
1169 if (FENCE_LTE(block->fence, s->fence))
1170 break;
1171 }
1172
1173 move_to_tail(s, block);
1174 }
1175 else {
1176 /* Return to the lru list:
1177 */
1178 move_to_tail(&block->pool->lru, block);
1179 }
1180
1181 block->referenced = 0;
1182 }
1183 }
1184 UNLOCK(bm);
1185 }
1186
1187
1188 /* This functionality is used by the buffer manager, not really sure
1189 * if we need to be exposing it in this way, probably libdrm will
1190 * offer equivalent calls.
1191 *
1192 * For now they can stay, but will likely change/move before final:
1193 */
1194 unsigned bmSetFence( struct intel_context *intel )
1195 {
1196 assert(intel->locked);
1197
1198 /* Emit MI_FLUSH here:
1199 */
1200 if (intel->bm->need_fence) {
1201
1202 /* Emit a flush without using a batchbuffer. Can't rely on the
1203 * batchbuffer at this level really. Would really prefer that
1204 * the IRQ ioctly emitted the flush at the same time.
1205 */
1206 GLuint dword[2];
1207 dword[0] = intel->vtbl.flush_cmd();
1208 dword[1] = 0;
1209 intel_cmd_ioctl(intel, (char *)&dword, sizeof(dword));
1210
1211 intel->bm->last_fence = intelEmitIrqLocked( intel );
1212
1213 fence_blocks(intel, intel->bm->last_fence);
1214
1215 intel->vtbl.note_fence(intel, intel->bm->last_fence);
1216 intel->bm->need_fence = 0;
1217
1218 if (intel->thrashing) {
1219 intel->thrashing--;
1220 if (!intel->thrashing)
1221 DBG("not thrashing\n");
1222 }
1223
1224 intel->bm->free_on_hardware = 0;
1225 }
1226
1227 return intel->bm->last_fence;
1228 }
1229
1230 unsigned bmSetFenceLock( struct intel_context *intel )
1231 {
1232 unsigned last;
1233 LOCK(intel->bm);
1234 last = bmSetFence(intel);
1235 UNLOCK(intel->bm);
1236 return last;
1237 }
1238 unsigned bmLockAndFence( struct intel_context *intel )
1239 {
1240 if (intel->bm->need_fence) {
1241 LOCK_HARDWARE(intel);
1242 LOCK(intel->bm);
1243 bmSetFence(intel);
1244 UNLOCK(intel->bm);
1245 UNLOCK_HARDWARE(intel);
1246 }
1247
1248 return intel->bm->last_fence;
1249 }
1250
1251
1252 void bmFinishFence( struct intel_context *intel, unsigned fence )
1253 {
1254 if (!bmTestFence(intel, fence)) {
1255 DBG("...wait on fence %d\n", fence);
1256 intelWaitIrq( intel, fence );
1257 }
1258 assert(bmTestFence(intel, fence));
1259 check_fenced(intel);
1260 }
1261
1262 void bmFinishFenceLock( struct intel_context *intel, unsigned fence )
1263 {
1264 LOCK(intel->bm);
1265 bmFinishFence(intel, fence);
1266 UNLOCK(intel->bm);
1267 }
1268
1269
1270 /* Specifically ignore texture memory sharing.
1271 * -- just evict everything
1272 * -- and wait for idle
1273 */
1274 void bm_fake_NotifyContendedLockTake( struct intel_context *intel )
1275 {
1276 struct bufmgr *bm = intel->bm;
1277
1278 LOCK(bm);
1279 {
1280 struct block *block, *tmp;
1281 GLuint i;
1282
1283 assert(is_empty_list(&bm->referenced));
1284
1285 bm->need_fence = 1;
1286 bm->fail = 0;
1287 bmFinishFence(intel, bmSetFence(intel));
1288
1289 assert(is_empty_list(&bm->fenced));
1290 assert(is_empty_list(&bm->on_hardware));
1291
1292 for (i = 0; i < bm->nr_pools; i++) {
1293 if (!(bm->pool[i].flags & BM_NO_EVICT)) {
1294 foreach_s(block, tmp, &bm->pool[i].lru) {
1295 assert(bmTestFence(intel, block->fence));
1296 set_dirty(intel, block->buf);
1297 }
1298 }
1299 }
1300 }
1301 UNLOCK(bm);
1302 }
1303
1304
1305
1306 void bmEvictAll( struct intel_context *intel )
1307 {
1308 struct bufmgr *bm = intel->bm;
1309
1310 LOCK(bm);
1311 {
1312 struct block *block, *tmp;
1313 GLuint i;
1314
1315 DBG("%s\n", __FUNCTION__);
1316
1317 assert(is_empty_list(&bm->referenced));
1318
1319 bm->need_fence = 1;
1320 bm->fail = 0;
1321 bmFinishFence(intel, bmSetFence(intel));
1322
1323 assert(is_empty_list(&bm->fenced));
1324 assert(is_empty_list(&bm->on_hardware));
1325
1326 for (i = 0; i < bm->nr_pools; i++) {
1327 if (!(bm->pool[i].flags & BM_NO_EVICT)) {
1328 foreach_s(block, tmp, &bm->pool[i].lru) {
1329 assert(bmTestFence(intel, block->fence));
1330 set_dirty(intel, block->buf);
1331 block->buf->block = NULL;
1332
1333 free_block(intel, block);
1334 }
1335 }
1336 }
1337 }
1338 UNLOCK(bm);
1339 }
1340
1341
1342 GLboolean bmError( struct intel_context *intel )
1343 {
1344 struct bufmgr *bm = intel->bm;
1345 GLboolean retval;
1346
1347 LOCK(bm);
1348 {
1349 retval = bm->fail;
1350 }
1351 UNLOCK(bm);
1352
1353 return retval;
1354 }
1355
1356
1357 GLuint bmCtxId( struct intel_context *intel )
1358 {
1359 return intel->bm->ctxId;
1360 }