Merge commit 'origin/gallium-master-merge'
[mesa.git] / src / gallium / winsys / drm / intel / common / ws_dri_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
31 */
32
33 #include <xf86drm.h>
34 #include <stdlib.h>
35 #include <stdio.h>
36 #include "pipe/p_thread.h"
37 #include "errno.h"
38 #include "ws_dri_bufmgr.h"
39 #include "string.h"
40 #include "pipe/p_debug.h"
41 #include "ws_dri_bufpool.h"
42 #include "ws_dri_fencemgr.h"
43
44
45 /*
46 * This lock is here to protect drmBO structs changing underneath us during a
47 * validate list call, since validatelist cannot take individiual locks for
48 * each drmBO. Validatelist takes this lock in write mode. Any access to an
49 * individual drmBO should take this lock in read mode, since in that case, the
50 * driBufferObject mutex will protect the access. Locking order is
51 * driBufferObject mutex - > this rw lock.
52 */
53
54 pipe_static_mutex(bmMutex);
55 pipe_static_condvar(bmCond);
56
57 static int kernelReaders = 0;
58 static int num_buffers = 0;
59 static int num_user_buffers = 0;
60
61 static drmBO *drmBOListBuf(void *iterator)
62 {
63 drmBONode *node;
64 drmMMListHead *l = (drmMMListHead *) iterator;
65 node = DRMLISTENTRY(drmBONode, l, head);
66 return node->buf;
67 }
68
69 static void *drmBOListIterator(drmBOList *list)
70 {
71 void *ret = list->list.next;
72
73 if (ret == &list->list)
74 return NULL;
75 return ret;
76 }
77
78 static void *drmBOListNext(drmBOList *list, void *iterator)
79 {
80 void *ret;
81
82 drmMMListHead *l = (drmMMListHead *) iterator;
83 ret = l->next;
84 if (ret == &list->list)
85 return NULL;
86 return ret;
87 }
88
89 static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
90 uint64_t arg0,
91 uint64_t arg1)
92 {
93 drmBONode *node;
94 drmMMListHead *l;
95
96 l = list->free.next;
97 if (l == &list->free) {
98 node = (drmBONode *) malloc(sizeof(*node));
99 if (!node) {
100 return NULL;
101 }
102 list->numCurrent++;
103 }
104 else {
105 DRMLISTDEL(l);
106 node = DRMLISTENTRY(drmBONode, l, head);
107 }
108 node->buf = item;
109 node->arg0 = arg0;
110 node->arg1 = arg1;
111 DRMLISTADD(&node->head, &list->list);
112 list->numOnList++;
113 return node;
114 }
115
116 static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
117 uint64_t mask, int *newItem)
118 {
119 drmBONode *node, *cur;
120 drmMMListHead *l;
121
122 *newItem = 0;
123 cur = NULL;
124
125 for (l = list->list.next; l != &list->list; l = l->next) {
126 node = DRMLISTENTRY(drmBONode, l, head);
127 if (node->buf == buf) {
128 cur = node;
129 break;
130 }
131 }
132 if (!cur) {
133 cur = drmAddListItem(list, buf, flags, mask);
134 if (!cur) {
135 return -ENOMEM;
136 }
137 *newItem = 1;
138 cur->arg0 = flags;
139 cur->arg1 = mask;
140 }
141 else {
142 uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
143 uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
144
145 if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
146 return -EINVAL;
147 }
148
149 cur->arg1 |= mask;
150 cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
151
152 if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
153 (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
154 return -EINVAL;
155 }
156 }
157 return 0;
158 }
159
160 static void drmBOFreeList(drmBOList *list)
161 {
162 drmBONode *node;
163 drmMMListHead *l;
164
165 l = list->list.next;
166 while(l != &list->list) {
167 DRMLISTDEL(l);
168 node = DRMLISTENTRY(drmBONode, l, head);
169 free(node);
170 l = list->list.next;
171 list->numCurrent--;
172 list->numOnList--;
173 }
174
175 l = list->free.next;
176 while(l != &list->free) {
177 DRMLISTDEL(l);
178 node = DRMLISTENTRY(drmBONode, l, head);
179 free(node);
180 l = list->free.next;
181 list->numCurrent--;
182 }
183 }
184
185 static int drmAdjustListNodes(drmBOList *list)
186 {
187 drmBONode *node;
188 drmMMListHead *l;
189 int ret = 0;
190
191 while(list->numCurrent < list->numTarget) {
192 node = (drmBONode *) malloc(sizeof(*node));
193 if (!node) {
194 ret = -ENOMEM;
195 break;
196 }
197 list->numCurrent++;
198 DRMLISTADD(&node->head, &list->free);
199 }
200
201 while(list->numCurrent > list->numTarget) {
202 l = list->free.next;
203 if (l == &list->free)
204 break;
205 DRMLISTDEL(l);
206 node = DRMLISTENTRY(drmBONode, l, head);
207 free(node);
208 list->numCurrent--;
209 }
210 return ret;
211 }
212
213 static int drmBOCreateList(int numTarget, drmBOList *list)
214 {
215 DRMINITLISTHEAD(&list->list);
216 DRMINITLISTHEAD(&list->free);
217 list->numTarget = numTarget;
218 list->numCurrent = 0;
219 list->numOnList = 0;
220 return drmAdjustListNodes(list);
221 }
222
223 static int drmBOResetList(drmBOList *list)
224 {
225 drmMMListHead *l;
226 int ret;
227
228 ret = drmAdjustListNodes(list);
229 if (ret)
230 return ret;
231
232 l = list->list.next;
233 while (l != &list->list) {
234 DRMLISTDEL(l);
235 DRMLISTADD(l, &list->free);
236 list->numOnList--;
237 l = list->list.next;
238 }
239 return drmAdjustListNodes(list);
240 }
241
242 void driWriteLockKernelBO(void)
243 {
244 pipe_mutex_lock(bmMutex);
245 while(kernelReaders != 0)
246 pipe_condvar_wait(bmCond, bmMutex);
247 }
248
249 void driWriteUnlockKernelBO(void)
250 {
251 pipe_mutex_unlock(bmMutex);
252 }
253
254 void driReadLockKernelBO(void)
255 {
256 pipe_mutex_lock(bmMutex);
257 kernelReaders++;
258 pipe_mutex_unlock(bmMutex);
259 }
260
261 void driReadUnlockKernelBO(void)
262 {
263 pipe_mutex_lock(bmMutex);
264 if (--kernelReaders == 0)
265 pipe_condvar_broadcast(bmCond);
266 pipe_mutex_unlock(bmMutex);
267 }
268
269
270
271
272 /*
273 * TODO: Introduce fence pools in the same way as
274 * buffer object pools.
275 */
276
277 typedef struct _DriBufferObject
278 {
279 DriBufferPool *pool;
280 pipe_mutex mutex;
281 int refCount;
282 const char *name;
283 uint64_t flags;
284 unsigned hint;
285 unsigned alignment;
286 unsigned createdByReference;
287 void *private;
288 /* user-space buffer: */
289 unsigned userBuffer;
290 void *userData;
291 unsigned userSize;
292 } DriBufferObject;
293
294 typedef struct _DriBufferList {
295 drmBOList drmBuffers; /* List of kernel buffers needing validation */
296 drmBOList driBuffers; /* List of user-space buffers needing validation */
297 } DriBufferList;
298
299
300 void
301 bmError(int val, const char *file, const char *function, int line)
302 {
303 printf("Fatal video memory manager error \"%s\".\n"
304 "Check kernel logs or set the LIBGL_DEBUG\n"
305 "environment variable to \"verbose\" for more info.\n"
306 "Detected in file %s, line %d, function %s.\n",
307 strerror(-val), file, line, function);
308 #ifndef NDEBUG
309 abort();
310 #else
311 abort();
312 #endif
313 }
314
315 extern drmBO *
316 driBOKernel(struct _DriBufferObject *buf)
317 {
318 drmBO *ret;
319
320 driReadLockKernelBO();
321 pipe_mutex_lock(buf->mutex);
322 assert(buf->private != NULL);
323 ret = buf->pool->kernel(buf->pool, buf->private);
324 if (!ret)
325 BM_CKFATAL(-EINVAL);
326 pipe_mutex_unlock(buf->mutex);
327 driReadUnlockKernelBO();
328
329 return ret;
330 }
331
332 void
333 driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
334 {
335
336 /*
337 * This function may block. Is it sane to keep the mutex held during
338 * that time??
339 */
340
341 pipe_mutex_lock(buf->mutex);
342 BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
343 pipe_mutex_unlock(buf->mutex);
344 }
345
346 void *
347 driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
348 {
349 void *virtual;
350 int retval;
351
352 if (buf->userBuffer) {
353 return buf->userData;
354 }
355
356 pipe_mutex_lock(buf->mutex);
357 assert(buf->private != NULL);
358 retval = buf->pool->map(buf->pool, buf->private, flags, hint,
359 &buf->mutex, &virtual);
360 pipe_mutex_unlock(buf->mutex);
361
362 return retval == 0 ? virtual : NULL;
363 }
364
365 void
366 driBOUnmap(struct _DriBufferObject *buf)
367 {
368 if (buf->userBuffer)
369 return;
370
371 assert(buf->private != NULL);
372 pipe_mutex_lock(buf->mutex);
373 BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
374 pipe_mutex_unlock(buf->mutex);
375 }
376
377 unsigned long
378 driBOOffset(struct _DriBufferObject *buf)
379 {
380 unsigned long ret;
381
382 assert(buf->private != NULL);
383
384 pipe_mutex_lock(buf->mutex);
385 ret = buf->pool->offset(buf->pool, buf->private);
386 pipe_mutex_unlock(buf->mutex);
387 return ret;
388 }
389
390 unsigned long
391 driBOPoolOffset(struct _DriBufferObject *buf)
392 {
393 unsigned long ret;
394
395 assert(buf->private != NULL);
396
397 pipe_mutex_lock(buf->mutex);
398 ret = buf->pool->poolOffset(buf->pool, buf->private);
399 pipe_mutex_unlock(buf->mutex);
400 return ret;
401 }
402
403 uint64_t
404 driBOFlags(struct _DriBufferObject *buf)
405 {
406 uint64_t ret;
407
408 assert(buf->private != NULL);
409
410 driReadLockKernelBO();
411 pipe_mutex_lock(buf->mutex);
412 ret = buf->pool->flags(buf->pool, buf->private);
413 pipe_mutex_unlock(buf->mutex);
414 driReadUnlockKernelBO();
415 return ret;
416 }
417
418 struct _DriBufferObject *
419 driBOReference(struct _DriBufferObject *buf)
420 {
421 pipe_mutex_lock(buf->mutex);
422 if (++buf->refCount == 1) {
423 pipe_mutex_unlock(buf->mutex);
424 BM_CKFATAL(-EINVAL);
425 }
426 pipe_mutex_unlock(buf->mutex);
427 return buf;
428 }
429
430 void
431 driBOUnReference(struct _DriBufferObject *buf)
432 {
433 int tmp;
434
435 if (!buf)
436 return;
437
438 pipe_mutex_lock(buf->mutex);
439 tmp = --buf->refCount;
440 if (!tmp) {
441 pipe_mutex_unlock(buf->mutex);
442 if (buf->private) {
443 if (buf->createdByReference)
444 buf->pool->unreference(buf->pool, buf->private);
445 else
446 buf->pool->destroy(buf->pool, buf->private);
447 }
448 if (buf->userBuffer)
449 num_user_buffers--;
450 else
451 num_buffers--;
452 free(buf);
453 } else
454 pipe_mutex_unlock(buf->mutex);
455
456 }
457
458
459 int
460 driBOData(struct _DriBufferObject *buf,
461 unsigned size, const void *data,
462 DriBufferPool *newPool,
463 uint64_t flags)
464 {
465 void *virtual = NULL;
466 int newBuffer;
467 int retval = 0;
468 struct _DriBufferPool *pool;
469
470 assert(!buf->userBuffer); /* XXX just do a memcpy? */
471
472 pipe_mutex_lock(buf->mutex);
473 pool = buf->pool;
474
475 if (pool == NULL && newPool != NULL) {
476 buf->pool = newPool;
477 pool = newPool;
478 }
479 if (newPool == NULL)
480 newPool = pool;
481
482 if (!pool->create) {
483 assert((size_t)"driBOData called on invalid buffer\n" & 0);
484 BM_CKFATAL(-EINVAL);
485 }
486
487 newBuffer = (!buf->private || pool != newPool ||
488 pool->size(pool, buf->private) < size);
489
490 if (!flags)
491 flags = buf->flags;
492
493 if (newBuffer) {
494
495 if (buf->createdByReference) {
496 assert((size_t)"driBOData requiring resizing called on shared buffer.\n" & 0);
497 BM_CKFATAL(-EINVAL);
498 }
499
500 if (buf->private)
501 buf->pool->destroy(buf->pool, buf->private);
502
503 pool = newPool;
504 buf->pool = newPool;
505 buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
506 buf->alignment);
507 if (!buf->private)
508 retval = -ENOMEM;
509
510 if (retval == 0)
511 retval = pool->map(pool, buf->private,
512 DRM_BO_FLAG_WRITE,
513 DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
514 } else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
515 DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
516 /*
517 * Buffer is busy. need to create a new one.
518 */
519
520 void *newBuf;
521
522 newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
523 buf->alignment);
524 if (newBuf) {
525 buf->pool->destroy(buf->pool, buf->private);
526 buf->private = newBuf;
527 }
528
529 retval = pool->map(pool, buf->private,
530 DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
531 } else {
532 uint64_t flag_diff = flags ^ buf->flags;
533
534 /*
535 * We might need to change buffer flags.
536 */
537
538 if (flag_diff){
539 assert(pool->setStatus != NULL);
540 BM_CKFATAL(pool->unmap(pool, buf->private));
541 BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
542 buf->flags));
543 if (!data)
544 goto out;
545
546 retval = pool->map(pool, buf->private,
547 DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
548 }
549 }
550
551 if (retval == 0) {
552 if (data)
553 memcpy(virtual, data, size);
554
555 BM_CKFATAL(pool->unmap(pool, buf->private));
556 }
557
558 out:
559 pipe_mutex_unlock(buf->mutex);
560
561 return retval;
562 }
563
564 void
565 driBOSubData(struct _DriBufferObject *buf,
566 unsigned long offset, unsigned long size, const void *data)
567 {
568 void *virtual;
569
570 assert(!buf->userBuffer); /* XXX just do a memcpy? */
571
572 pipe_mutex_lock(buf->mutex);
573 if (size && data) {
574 BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
575 DRM_BO_FLAG_WRITE, 0, &buf->mutex,
576 &virtual));
577 memcpy((unsigned char *) virtual + offset, data, size);
578 BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
579 }
580 pipe_mutex_unlock(buf->mutex);
581 }
582
583 void
584 driBOGetSubData(struct _DriBufferObject *buf,
585 unsigned long offset, unsigned long size, void *data)
586 {
587 void *virtual;
588
589 assert(!buf->userBuffer); /* XXX just do a memcpy? */
590
591 pipe_mutex_lock(buf->mutex);
592 if (size && data) {
593 BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
594 DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
595 memcpy(data, (unsigned char *) virtual + offset, size);
596 BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
597 }
598 pipe_mutex_unlock(buf->mutex);
599 }
600
601 void
602 driBOSetReferenced(struct _DriBufferObject *buf,
603 unsigned long handle)
604 {
605 pipe_mutex_lock(buf->mutex);
606 if (buf->private != NULL) {
607 assert((size_t)"Invalid buffer for setReferenced\n" & 0);
608 BM_CKFATAL(-EINVAL);
609
610 }
611 if (buf->pool->reference == NULL) {
612 assert((size_t)"Invalid buffer pool for setReferenced\n" & 0);
613 BM_CKFATAL(-EINVAL);
614 }
615 buf->private = buf->pool->reference(buf->pool, handle);
616 if (!buf->private) {
617 assert((size_t)"Invalid buffer pool for setStatic\n" & 0);
618 BM_CKFATAL(-ENOMEM);
619 }
620 buf->createdByReference = TRUE;
621 buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
622 pipe_mutex_unlock(buf->mutex);
623 }
624
625 int
626 driGenBuffers(struct _DriBufferPool *pool,
627 const char *name,
628 unsigned n,
629 struct _DriBufferObject *buffers[],
630 unsigned alignment, uint64_t flags, unsigned hint)
631 {
632 struct _DriBufferObject *buf;
633 int i;
634
635 flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
636 DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
637
638 ++num_buffers;
639
640 assert(pool);
641
642 for (i = 0; i < n; ++i) {
643 buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
644 if (!buf)
645 return -ENOMEM;
646
647 pipe_mutex_init(buf->mutex);
648 pipe_mutex_lock(buf->mutex);
649 buf->refCount = 1;
650 buf->flags = flags;
651 buf->hint = hint;
652 buf->name = name;
653 buf->alignment = alignment;
654 buf->pool = pool;
655 buf->createdByReference = 0;
656 pipe_mutex_unlock(buf->mutex);
657 buffers[i] = buf;
658 }
659 return 0;
660 }
661
662 void
663 driGenUserBuffer(struct _DriBufferPool *pool,
664 const char *name,
665 struct _DriBufferObject **buffers,
666 void *ptr, unsigned bytes)
667 {
668 const unsigned alignment = 1, flags = 0, hint = 0;
669
670 --num_buffers; /* JB: is inced in GenBuffes */
671 driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
672 ++num_user_buffers;
673
674 (*buffers)->userBuffer = 1;
675 (*buffers)->userData = ptr;
676 (*buffers)->userSize = bytes;
677 }
678
679 void
680 driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
681 {
682 int i;
683
684 for (i = 0; i < n; ++i) {
685 driBOUnReference(buffers[i]);
686 }
687 }
688
689
690 void
691 driInitBufMgr(int fd)
692 {
693 ;
694 }
695
696 /*
697 * Note that lists are per-context and don't need mutex protection.
698 */
699
700 struct _DriBufferList *
701 driBOCreateList(int target)
702 {
703 struct _DriBufferList *list = calloc(sizeof(*list), 1);
704
705 BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
706 BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
707 return list;
708 }
709
710 int
711 driBOResetList(struct _DriBufferList * list)
712 {
713 int ret;
714 ret = drmBOResetList(&list->drmBuffers);
715 if (ret)
716 return ret;
717 ret = drmBOResetList(&list->driBuffers);
718 return ret;
719 }
720
721 void
722 driBOFreeList(struct _DriBufferList * list)
723 {
724 drmBOFreeList(&list->drmBuffers);
725 drmBOFreeList(&list->driBuffers);
726 free(list);
727 }
728
729
730 /*
731 * Copied from libdrm, because it is needed by driAddValidateItem.
732 */
733
734 static drmBONode *
735 driAddListItem(drmBOList * list, drmBO * item,
736 uint64_t arg0, uint64_t arg1)
737 {
738 drmBONode *node;
739 drmMMListHead *l;
740
741 l = list->free.next;
742 if (l == &list->free) {
743 node = (drmBONode *) malloc(sizeof(*node));
744 if (!node) {
745 return NULL;
746 }
747 list->numCurrent++;
748 } else {
749 DRMLISTDEL(l);
750 node = DRMLISTENTRY(drmBONode, l, head);
751 }
752 memset(&node->bo_arg, 0, sizeof(node->bo_arg));
753 node->buf = item;
754 node->arg0 = arg0;
755 node->arg1 = arg1;
756 DRMLISTADDTAIL(&node->head, &list->list);
757 list->numOnList++;
758 return node;
759 }
760
761 /*
762 * Slightly modified version compared to the libdrm version.
763 * This one returns the list index of the buffer put on the list.
764 */
765
766 static int
767 driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
768 uint64_t mask, int *itemLoc,
769 struct _drmBONode **pnode)
770 {
771 drmBONode *node, *cur;
772 drmMMListHead *l;
773 int count = 0;
774
775 cur = NULL;
776
777 for (l = list->list.next; l != &list->list; l = l->next) {
778 node = DRMLISTENTRY(drmBONode, l, head);
779 if (node->buf == buf) {
780 cur = node;
781 break;
782 }
783 count++;
784 }
785 if (!cur) {
786 cur = driAddListItem(list, buf, flags, mask);
787 if (!cur)
788 return -ENOMEM;
789
790 cur->arg0 = flags;
791 cur->arg1 = mask;
792 } else {
793 uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
794 uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
795
796 if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
797 return -EINVAL;
798 }
799
800 cur->arg1 |= mask;
801 cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
802
803 if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
804 (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
805 return -EINVAL;
806 }
807 }
808 *itemLoc = count;
809 *pnode = cur;
810 return 0;
811 }
812
813
814 void
815 driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
816 uint64_t flags, uint64_t mask, int *itemLoc,
817 struct _drmBONode **node)
818 {
819 int newItem;
820
821 pipe_mutex_lock(buf->mutex);
822 BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
823 buf->pool->kernel(buf->pool, buf->private),
824 flags, mask, itemLoc, node));
825 BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
826 flags, mask, &newItem));
827 if (newItem)
828 buf->refCount++;
829
830 pipe_mutex_unlock(buf->mutex);
831 }
832
833 drmBOList *driGetdrmBOList(struct _DriBufferList *list)
834 {
835 driWriteLockKernelBO();
836 return &list->drmBuffers;
837 }
838
839 void driPutdrmBOList(struct _DriBufferList *list)
840 {
841 driWriteUnlockKernelBO();
842 }
843
844
845 void
846 driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
847 {
848 pipe_mutex_lock(buf->mutex);
849 if (buf->pool->fence)
850 BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
851 pipe_mutex_unlock(buf->mutex);
852
853 }
854
855 void
856 driBOUnrefUserList(struct _DriBufferList *list)
857 {
858 struct _DriBufferObject *buf;
859 void *curBuf;
860
861 curBuf = drmBOListIterator(&list->driBuffers);
862 while (curBuf) {
863 buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
864 driBOUnReference(buf);
865 curBuf = drmBOListNext(&list->driBuffers, curBuf);
866 }
867 }
868
869 struct _DriFenceObject *
870 driBOFenceUserList(struct _DriFenceMgr *mgr,
871 struct _DriBufferList *list, const char *name,
872 drmFence *kFence)
873 {
874 struct _DriFenceObject *fence;
875 struct _DriBufferObject *buf;
876 void *curBuf;
877
878 fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
879 kFence, sizeof(*kFence));
880 curBuf = drmBOListIterator(&list->driBuffers);
881
882 /*
883 * User-space fencing callbacks.
884 */
885
886 while (curBuf) {
887 buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
888 driBOFence(buf, fence);
889 driBOUnReference(buf);
890 curBuf = drmBOListNext(&list->driBuffers, curBuf);
891 }
892
893 driBOResetList(list);
894 return fence;
895 }
896
897 void
898 driBOValidateUserList(struct _DriBufferList * list)
899 {
900 void *curBuf;
901 struct _DriBufferObject *buf;
902
903 curBuf = drmBOListIterator(&list->driBuffers);
904
905 /*
906 * User-space validation callbacks.
907 */
908
909 while (curBuf) {
910 buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
911 pipe_mutex_lock(buf->mutex);
912 if (buf->pool->validate)
913 BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
914 pipe_mutex_unlock(buf->mutex);
915 curBuf = drmBOListNext(&list->driBuffers, curBuf);
916 }
917 }
918
919
920 void
921 driPoolTakeDown(struct _DriBufferPool *pool)
922 {
923 pool->takeDown(pool);
924
925 }
926
927 unsigned long
928 driBOSize(struct _DriBufferObject *buf)
929 {
930 unsigned long size;
931
932 pipe_mutex_lock(buf->mutex);
933 size = buf->pool->size(buf->pool, buf->private);
934 pipe_mutex_unlock(buf->mutex);
935
936 return size;
937
938 }
939
940 drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
941 {
942 return &list->drmBuffers;
943 }
944
945 drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
946 {
947 return &list->driBuffers;
948 }
949