1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
36 #include "pipe/p_thread.h"
38 #include "ws_dri_bufmgr.h"
40 #include "pipe/p_debug.h"
41 #include "ws_dri_bufpool.h"
42 #include "ws_dri_fencemgr.h"
46 * This lock is here to protect drmBO structs changing underneath us during a
47 * validate list call, since validatelist cannot take individiual locks for
48 * each drmBO. Validatelist takes this lock in write mode. Any access to an
49 * individual drmBO should take this lock in read mode, since in that case, the
50 * driBufferObject mutex will protect the access. Locking order is
51 * driBufferObject mutex - > this rw lock.
54 pipe_static_mutex(bmMutex
);
55 pipe_static_condvar(bmCond
);
57 static int kernelReaders
= 0;
58 static int num_buffers
= 0;
59 static int num_user_buffers
= 0;
61 static drmBO
*drmBOListBuf(void *iterator
)
64 drmMMListHead
*l
= (drmMMListHead
*) iterator
;
65 node
= DRMLISTENTRY(drmBONode
, l
, head
);
69 static void *drmBOListIterator(drmBOList
*list
)
71 void *ret
= list
->list
.next
;
73 if (ret
== &list
->list
)
78 static void *drmBOListNext(drmBOList
*list
, void *iterator
)
82 drmMMListHead
*l
= (drmMMListHead
*) iterator
;
84 if (ret
== &list
->list
)
89 static drmBONode
*drmAddListItem(drmBOList
*list
, drmBO
*item
,
97 if (l
== &list
->free
) {
98 node
= (drmBONode
*) malloc(sizeof(*node
));
106 node
= DRMLISTENTRY(drmBONode
, l
, head
);
111 DRMLISTADD(&node
->head
, &list
->list
);
116 static int drmAddValidateItem(drmBOList
*list
, drmBO
*buf
, uint64_t flags
,
117 uint64_t mask
, int *newItem
)
119 drmBONode
*node
, *cur
;
125 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
126 node
= DRMLISTENTRY(drmBONode
, l
, head
);
127 if (node
->buf
== buf
) {
133 cur
= drmAddListItem(list
, buf
, flags
, mask
);
142 uint64_t memFlags
= cur
->arg0
& flags
& DRM_BO_MASK_MEM
;
143 uint64_t accFlags
= (cur
->arg0
| flags
) & ~DRM_BO_MASK_MEM
;
145 if (mask
& cur
->arg1
& ~DRM_BO_MASK_MEM
& (cur
->arg0
^ flags
)) {
150 cur
->arg0
= (cur
->arg0
& ~mask
) | ((memFlags
| accFlags
) & mask
);
152 if (((cur
->arg1
& DRM_BO_MASK_MEM
) != 0) &&
153 (cur
->arg0
& DRM_BO_MASK_MEM
) == 0) {
160 static void drmBOFreeList(drmBOList
*list
)
166 while(l
!= &list
->list
) {
168 node
= DRMLISTENTRY(drmBONode
, l
, head
);
176 while(l
!= &list
->free
) {
178 node
= DRMLISTENTRY(drmBONode
, l
, head
);
185 static int drmAdjustListNodes(drmBOList
*list
)
191 while(list
->numCurrent
< list
->numTarget
) {
192 node
= (drmBONode
*) malloc(sizeof(*node
));
198 DRMLISTADD(&node
->head
, &list
->free
);
201 while(list
->numCurrent
> list
->numTarget
) {
203 if (l
== &list
->free
)
206 node
= DRMLISTENTRY(drmBONode
, l
, head
);
213 static int drmBOCreateList(int numTarget
, drmBOList
*list
)
215 DRMINITLISTHEAD(&list
->list
);
216 DRMINITLISTHEAD(&list
->free
);
217 list
->numTarget
= numTarget
;
218 list
->numCurrent
= 0;
220 return drmAdjustListNodes(list
);
223 static int drmBOResetList(drmBOList
*list
)
228 ret
= drmAdjustListNodes(list
);
233 while (l
!= &list
->list
) {
235 DRMLISTADD(l
, &list
->free
);
239 return drmAdjustListNodes(list
);
242 void driWriteLockKernelBO(void)
244 pipe_mutex_lock(bmMutex
);
245 while(kernelReaders
!= 0)
246 pipe_condvar_wait(bmCond
, bmMutex
);
249 void driWriteUnlockKernelBO(void)
251 pipe_mutex_unlock(bmMutex
);
254 void driReadLockKernelBO(void)
256 pipe_mutex_lock(bmMutex
);
258 pipe_mutex_unlock(bmMutex
);
261 void driReadUnlockKernelBO(void)
263 pipe_mutex_lock(bmMutex
);
264 if (--kernelReaders
== 0)
265 pipe_condvar_broadcast(bmCond
);
266 pipe_mutex_unlock(bmMutex
);
273 * TODO: Introduce fence pools in the same way as
274 * buffer object pools.
277 typedef struct _DriBufferObject
286 unsigned createdByReference
;
288 /* user-space buffer: */
294 typedef struct _DriBufferList
{
295 drmBOList drmBuffers
; /* List of kernel buffers needing validation */
296 drmBOList driBuffers
; /* List of user-space buffers needing validation */
301 bmError(int val
, const char *file
, const char *function
, int line
)
303 printf("Fatal video memory manager error \"%s\".\n"
304 "Check kernel logs or set the LIBGL_DEBUG\n"
305 "environment variable to \"verbose\" for more info.\n"
306 "Detected in file %s, line %d, function %s.\n",
307 strerror(-val
), file
, line
, function
);
316 driBOKernel(struct _DriBufferObject
*buf
)
320 driReadLockKernelBO();
321 pipe_mutex_lock(buf
->mutex
);
322 assert(buf
->private != NULL
);
323 ret
= buf
->pool
->kernel(buf
->pool
, buf
->private);
326 pipe_mutex_unlock(buf
->mutex
);
327 driReadUnlockKernelBO();
333 driBOWaitIdle(struct _DriBufferObject
*buf
, int lazy
)
337 * This function may block. Is it sane to keep the mutex held during
341 pipe_mutex_lock(buf
->mutex
);
342 BM_CKFATAL(buf
->pool
->waitIdle(buf
->pool
, buf
->private, &buf
->mutex
, lazy
));
343 pipe_mutex_unlock(buf
->mutex
);
347 driBOMap(struct _DriBufferObject
*buf
, unsigned flags
, unsigned hint
)
352 if (buf
->userBuffer
) {
353 return buf
->userData
;
356 pipe_mutex_lock(buf
->mutex
);
357 assert(buf
->private != NULL
);
358 retval
= buf
->pool
->map(buf
->pool
, buf
->private, flags
, hint
,
359 &buf
->mutex
, &virtual);
360 pipe_mutex_unlock(buf
->mutex
);
362 return retval
== 0 ? virtual : NULL
;
366 driBOUnmap(struct _DriBufferObject
*buf
)
371 assert(buf
->private != NULL
);
372 pipe_mutex_lock(buf
->mutex
);
373 BM_CKFATAL(buf
->pool
->unmap(buf
->pool
, buf
->private));
374 pipe_mutex_unlock(buf
->mutex
);
378 driBOOffset(struct _DriBufferObject
*buf
)
382 assert(buf
->private != NULL
);
384 pipe_mutex_lock(buf
->mutex
);
385 ret
= buf
->pool
->offset(buf
->pool
, buf
->private);
386 pipe_mutex_unlock(buf
->mutex
);
391 driBOPoolOffset(struct _DriBufferObject
*buf
)
395 assert(buf
->private != NULL
);
397 pipe_mutex_lock(buf
->mutex
);
398 ret
= buf
->pool
->poolOffset(buf
->pool
, buf
->private);
399 pipe_mutex_unlock(buf
->mutex
);
404 driBOFlags(struct _DriBufferObject
*buf
)
408 assert(buf
->private != NULL
);
410 driReadLockKernelBO();
411 pipe_mutex_lock(buf
->mutex
);
412 ret
= buf
->pool
->flags(buf
->pool
, buf
->private);
413 pipe_mutex_unlock(buf
->mutex
);
414 driReadUnlockKernelBO();
418 struct _DriBufferObject
*
419 driBOReference(struct _DriBufferObject
*buf
)
421 pipe_mutex_lock(buf
->mutex
);
422 if (++buf
->refCount
== 1) {
423 pipe_mutex_unlock(buf
->mutex
);
426 pipe_mutex_unlock(buf
->mutex
);
431 driBOUnReference(struct _DriBufferObject
*buf
)
438 pipe_mutex_lock(buf
->mutex
);
439 tmp
= --buf
->refCount
;
441 pipe_mutex_unlock(buf
->mutex
);
443 if (buf
->createdByReference
)
444 buf
->pool
->unreference(buf
->pool
, buf
->private);
446 buf
->pool
->destroy(buf
->pool
, buf
->private);
454 pipe_mutex_unlock(buf
->mutex
);
460 driBOData(struct _DriBufferObject
*buf
,
461 unsigned size
, const void *data
,
462 DriBufferPool
*newPool
,
465 void *virtual = NULL
;
468 struct _DriBufferPool
*pool
;
470 assert(!buf
->userBuffer
); /* XXX just do a memcpy? */
472 pipe_mutex_lock(buf
->mutex
);
475 if (pool
== NULL
&& newPool
!= NULL
) {
483 assert((size_t)"driBOData called on invalid buffer\n" & 0);
487 newBuffer
= (!buf
->private || pool
!= newPool
||
488 pool
->size(pool
, buf
->private) < size
);
495 if (buf
->createdByReference
) {
496 assert((size_t)"driBOData requiring resizing called on shared buffer.\n" & 0);
501 buf
->pool
->destroy(buf
->pool
, buf
->private);
505 buf
->private = pool
->create(pool
, size
, flags
, DRM_BO_HINT_DONT_FENCE
,
511 retval
= pool
->map(pool
, buf
->private,
513 DRM_BO_HINT_DONT_BLOCK
, &buf
->mutex
, &virtual);
514 } else if (pool
->map(pool
, buf
->private, DRM_BO_FLAG_WRITE
,
515 DRM_BO_HINT_DONT_BLOCK
, &buf
->mutex
, &virtual)) {
517 * Buffer is busy. need to create a new one.
522 newBuf
= pool
->create(pool
, size
, flags
, DRM_BO_HINT_DONT_FENCE
,
525 buf
->pool
->destroy(buf
->pool
, buf
->private);
526 buf
->private = newBuf
;
529 retval
= pool
->map(pool
, buf
->private,
530 DRM_BO_FLAG_WRITE
, 0, &buf
->mutex
, &virtual);
532 uint64_t flag_diff
= flags
^ buf
->flags
;
535 * We might need to change buffer flags.
539 assert(pool
->setStatus
!= NULL
);
540 BM_CKFATAL(pool
->unmap(pool
, buf
->private));
541 BM_CKFATAL(pool
->setStatus(pool
, buf
->private, flag_diff
,
546 retval
= pool
->map(pool
, buf
->private,
547 DRM_BO_FLAG_WRITE
, 0, &buf
->mutex
, &virtual);
553 memcpy(virtual, data
, size
);
555 BM_CKFATAL(pool
->unmap(pool
, buf
->private));
559 pipe_mutex_unlock(buf
->mutex
);
565 driBOSubData(struct _DriBufferObject
*buf
,
566 unsigned long offset
, unsigned long size
, const void *data
)
570 assert(!buf
->userBuffer
); /* XXX just do a memcpy? */
572 pipe_mutex_lock(buf
->mutex
);
574 BM_CKFATAL(buf
->pool
->map(buf
->pool
, buf
->private,
575 DRM_BO_FLAG_WRITE
, 0, &buf
->mutex
,
577 memcpy((unsigned char *) virtual + offset
, data
, size
);
578 BM_CKFATAL(buf
->pool
->unmap(buf
->pool
, buf
->private));
580 pipe_mutex_unlock(buf
->mutex
);
584 driBOGetSubData(struct _DriBufferObject
*buf
,
585 unsigned long offset
, unsigned long size
, void *data
)
589 assert(!buf
->userBuffer
); /* XXX just do a memcpy? */
591 pipe_mutex_lock(buf
->mutex
);
593 BM_CKFATAL(buf
->pool
->map(buf
->pool
, buf
->private,
594 DRM_BO_FLAG_READ
, 0, &buf
->mutex
, &virtual));
595 memcpy(data
, (unsigned char *) virtual + offset
, size
);
596 BM_CKFATAL(buf
->pool
->unmap(buf
->pool
, buf
->private));
598 pipe_mutex_unlock(buf
->mutex
);
602 driBOSetReferenced(struct _DriBufferObject
*buf
,
603 unsigned long handle
)
605 pipe_mutex_lock(buf
->mutex
);
606 if (buf
->private != NULL
) {
607 assert((size_t)"Invalid buffer for setReferenced\n" & 0);
611 if (buf
->pool
->reference
== NULL
) {
612 assert((size_t)"Invalid buffer pool for setReferenced\n" & 0);
615 buf
->private = buf
->pool
->reference(buf
->pool
, handle
);
617 assert((size_t)"Invalid buffer pool for setStatic\n" & 0);
620 buf
->createdByReference
= TRUE
;
621 buf
->flags
= buf
->pool
->kernel(buf
->pool
, buf
->private)->flags
;
622 pipe_mutex_unlock(buf
->mutex
);
626 driGenBuffers(struct _DriBufferPool
*pool
,
629 struct _DriBufferObject
*buffers
[],
630 unsigned alignment
, uint64_t flags
, unsigned hint
)
632 struct _DriBufferObject
*buf
;
635 flags
= (flags
) ? flags
: DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_MEM_VRAM
|
636 DRM_BO_FLAG_MEM_LOCAL
| DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
;
642 for (i
= 0; i
< n
; ++i
) {
643 buf
= (struct _DriBufferObject
*) calloc(1, sizeof(*buf
));
647 pipe_mutex_init(buf
->mutex
);
648 pipe_mutex_lock(buf
->mutex
);
653 buf
->alignment
= alignment
;
655 buf
->createdByReference
= 0;
656 pipe_mutex_unlock(buf
->mutex
);
663 driGenUserBuffer(struct _DriBufferPool
*pool
,
665 struct _DriBufferObject
**buffers
,
666 void *ptr
, unsigned bytes
)
668 const unsigned alignment
= 1, flags
= 0, hint
= 0;
670 --num_buffers
; /* JB: is inced in GenBuffes */
671 driGenBuffers(pool
, name
, 1, buffers
, alignment
, flags
, hint
);
674 (*buffers
)->userBuffer
= 1;
675 (*buffers
)->userData
= ptr
;
676 (*buffers
)->userSize
= bytes
;
680 driDeleteBuffers(unsigned n
, struct _DriBufferObject
*buffers
[])
684 for (i
= 0; i
< n
; ++i
) {
685 driBOUnReference(buffers
[i
]);
691 driInitBufMgr(int fd
)
697 * Note that lists are per-context and don't need mutex protection.
700 struct _DriBufferList
*
701 driBOCreateList(int target
)
703 struct _DriBufferList
*list
= calloc(sizeof(*list
), 1);
705 BM_CKFATAL(drmBOCreateList(target
, &list
->drmBuffers
));
706 BM_CKFATAL(drmBOCreateList(target
, &list
->driBuffers
));
711 driBOResetList(struct _DriBufferList
* list
)
714 ret
= drmBOResetList(&list
->drmBuffers
);
717 ret
= drmBOResetList(&list
->driBuffers
);
722 driBOFreeList(struct _DriBufferList
* list
)
724 drmBOFreeList(&list
->drmBuffers
);
725 drmBOFreeList(&list
->driBuffers
);
731 * Copied from libdrm, because it is needed by driAddValidateItem.
735 driAddListItem(drmBOList
* list
, drmBO
* item
,
736 uint64_t arg0
, uint64_t arg1
)
742 if (l
== &list
->free
) {
743 node
= (drmBONode
*) malloc(sizeof(*node
));
750 node
= DRMLISTENTRY(drmBONode
, l
, head
);
752 memset(&node
->bo_arg
, 0, sizeof(node
->bo_arg
));
756 DRMLISTADDTAIL(&node
->head
, &list
->list
);
762 * Slightly modified version compared to the libdrm version.
763 * This one returns the list index of the buffer put on the list.
767 driAddValidateItem(drmBOList
* list
, drmBO
* buf
, uint64_t flags
,
768 uint64_t mask
, int *itemLoc
,
769 struct _drmBONode
**pnode
)
771 drmBONode
*node
, *cur
;
777 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
778 node
= DRMLISTENTRY(drmBONode
, l
, head
);
779 if (node
->buf
== buf
) {
786 cur
= driAddListItem(list
, buf
, flags
, mask
);
793 uint64_t memFlags
= cur
->arg0
& flags
& DRM_BO_MASK_MEM
;
794 uint64_t accFlags
= (cur
->arg0
| flags
) & ~DRM_BO_MASK_MEM
;
796 if (mask
& cur
->arg1
& ~DRM_BO_MASK_MEM
& (cur
->arg0
^ flags
)) {
801 cur
->arg0
= (cur
->arg0
& ~mask
) | ((memFlags
| accFlags
) & mask
);
803 if (((cur
->arg1
& DRM_BO_MASK_MEM
) != 0) &&
804 (cur
->arg0
& DRM_BO_MASK_MEM
) == 0) {
815 driBOAddListItem(struct _DriBufferList
* list
, struct _DriBufferObject
*buf
,
816 uint64_t flags
, uint64_t mask
, int *itemLoc
,
817 struct _drmBONode
**node
)
821 pipe_mutex_lock(buf
->mutex
);
822 BM_CKFATAL(driAddValidateItem(&list
->drmBuffers
,
823 buf
->pool
->kernel(buf
->pool
, buf
->private),
824 flags
, mask
, itemLoc
, node
));
825 BM_CKFATAL(drmAddValidateItem(&list
->driBuffers
, (drmBO
*) buf
,
826 flags
, mask
, &newItem
));
830 pipe_mutex_unlock(buf
->mutex
);
833 drmBOList
*driGetdrmBOList(struct _DriBufferList
*list
)
835 driWriteLockKernelBO();
836 return &list
->drmBuffers
;
839 void driPutdrmBOList(struct _DriBufferList
*list
)
841 driWriteUnlockKernelBO();
846 driBOFence(struct _DriBufferObject
*buf
, struct _DriFenceObject
*fence
)
848 pipe_mutex_lock(buf
->mutex
);
849 if (buf
->pool
->fence
)
850 BM_CKFATAL(buf
->pool
->fence(buf
->pool
, buf
->private, fence
));
851 pipe_mutex_unlock(buf
->mutex
);
856 driBOUnrefUserList(struct _DriBufferList
*list
)
858 struct _DriBufferObject
*buf
;
861 curBuf
= drmBOListIterator(&list
->driBuffers
);
863 buf
= (struct _DriBufferObject
*)drmBOListBuf(curBuf
);
864 driBOUnReference(buf
);
865 curBuf
= drmBOListNext(&list
->driBuffers
, curBuf
);
869 struct _DriFenceObject
*
870 driBOFenceUserList(struct _DriFenceMgr
*mgr
,
871 struct _DriBufferList
*list
, const char *name
,
874 struct _DriFenceObject
*fence
;
875 struct _DriBufferObject
*buf
;
878 fence
= driFenceCreate(mgr
, kFence
->fence_class
, kFence
->type
,
879 kFence
, sizeof(*kFence
));
880 curBuf
= drmBOListIterator(&list
->driBuffers
);
883 * User-space fencing callbacks.
887 buf
= (struct _DriBufferObject
*) drmBOListBuf(curBuf
);
888 driBOFence(buf
, fence
);
889 driBOUnReference(buf
);
890 curBuf
= drmBOListNext(&list
->driBuffers
, curBuf
);
893 driBOResetList(list
);
898 driBOValidateUserList(struct _DriBufferList
* list
)
901 struct _DriBufferObject
*buf
;
903 curBuf
= drmBOListIterator(&list
->driBuffers
);
906 * User-space validation callbacks.
910 buf
= (struct _DriBufferObject
*) drmBOListBuf(curBuf
);
911 pipe_mutex_lock(buf
->mutex
);
912 if (buf
->pool
->validate
)
913 BM_CKFATAL(buf
->pool
->validate(buf
->pool
, buf
->private, &buf
->mutex
));
914 pipe_mutex_unlock(buf
->mutex
);
915 curBuf
= drmBOListNext(&list
->driBuffers
, curBuf
);
921 driPoolTakeDown(struct _DriBufferPool
*pool
)
923 pool
->takeDown(pool
);
928 driBOSize(struct _DriBufferObject
*buf
)
932 pipe_mutex_lock(buf
->mutex
);
933 size
= buf
->pool
->size(buf
->pool
, buf
->private);
934 pipe_mutex_unlock(buf
->mutex
);
940 drmBOList
*driBOGetDRMBuffers(struct _DriBufferList
*list
)
942 return &list
->drmBuffers
;
945 drmBOList
*driBOGetDRIBuffers(struct _DriBufferList
*list
)
947 return &list
->driBuffers
;