1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
43 #include "dri_bufmgr.h"
49 #include "intel_bufmgr_ttm.h"
51 #define BUFMGR_DEBUG 0
53 #define MAX_RELOCS_PER_LIST 511
54 struct intel_reloc_info
58 GLuint delta
; /* not needed? */
67 struct drm_i915_op_arg bo_arg
;
70 void (*destroy
)(void *);
74 struct intel_bo_reloc_list
81 struct intel_bo_reloc_node
85 uint32_t nr_reloc_types
;
86 struct intel_bo_reloc_list type_list
;
89 struct intel_bo_list
{
95 void (*destroy
)(void *node
);
98 typedef struct _dri_bufmgr_ttm
{
102 _glthread_Mutex mutex
;
103 unsigned int fence_type
;
104 unsigned int fence_type_flush
;
106 /** ttm relocation list */
107 struct intel_bo_list list
;
108 struct intel_bo_list reloc_list
;
112 typedef struct _dri_bo_ttm
{
115 int refcount
; /* Protected by bufmgr->mutex */
119 * Note whether we are the owner of the buffer, to determine if we must
120 * drmBODestroy or drmBOUnreference to unreference the buffer.
125 typedef struct _dri_fence_ttm
129 int refcount
; /* Protected by bufmgr->mutex */
135 static int intel_adjust_list_nodes(struct intel_bo_list
*list
)
137 struct intel_bo_node
*node
;
141 while(list
->numCurrent
< list
->numTarget
) {
142 node
= (struct intel_bo_node
*) drmMalloc(sizeof(*node
));
148 DRMLISTADD(&node
->head
, &list
->free
);
151 while(list
->numCurrent
> list
->numTarget
) {
153 if (l
== &list
->free
)
156 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
164 void intel_bo_free_list(struct intel_bo_list
*list
)
166 struct intel_bo_node
*node
;
170 while(l
!= &list
->list
) {
172 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
180 while(l
!= &list
->free
) {
182 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
190 static int intel_bo_reset_list(struct intel_bo_list
*list
)
195 ret
= intel_adjust_list_nodes(list
);
200 while (l
!= &list
->list
) {
202 DRMLISTADD(l
, &list
->free
);
206 return intel_adjust_list_nodes(list
);
209 static void generic_destroy(void *nodep
)
214 static int intel_create_bo_list(int numTarget
, struct intel_bo_list
*list
, void (*destroy
)(void *))
216 DRMINITLISTHEAD(&list
->list
);
217 DRMINITLISTHEAD(&list
->free
);
218 list
->numTarget
= numTarget
;
219 list
->numCurrent
= 0;
222 list
->destroy
= destroy
;
224 list
->destroy
= generic_destroy
;
225 return intel_adjust_list_nodes(list
);
229 static struct drm_i915_op_arg
*
230 intel_setup_validate_list(int fd
, struct intel_bo_list
*list
, struct intel_bo_list
*reloc_list
)
232 struct intel_bo_node
*node
;
233 struct intel_bo_reloc_node
*rl_node
;
234 drmMMListHead
*l
, *rl
;
235 struct drm_i915_op_arg
*arg
, *first
;
236 struct drm_bo_op_req
*req
;
237 uint64_t *prevNext
= NULL
;
241 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
242 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
251 *prevNext
= (unsigned long) arg
;
253 memset(arg
, 0, sizeof(*arg
));
254 prevNext
= &arg
->next
;
255 req
->bo_req
.handle
= node
->buf
->handle
;
256 req
->op
= drm_bo_validate
;
257 req
->bo_req
.flags
= node
->arg0
;
258 req
->bo_req
.hint
= 0;
259 req
->bo_req
.mask
= node
->arg1
;
260 req
->bo_req
.fence_class
= 0; /* Backwards compat. */
261 arg
->reloc_handle
= 0;
263 for (rl
= reloc_list
->list
.next
; rl
!= &reloc_list
->list
; rl
= rl
->next
) {
264 rl_node
= DRMLISTENTRY(struct intel_bo_reloc_node
, rl
, head
);
266 if (rl_node
->handle
== node
->buf
->handle
) {
267 arg
->reloc_handle
= rl_node
->type_list
.buf
.handle
;
278 static void intel_free_validate_list(int fd
, struct intel_bo_list
*list
)
280 struct intel_bo_node
*node
;
283 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
284 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
287 (*node
->destroy
)(node
->priv
);
292 static void intel_free_reloc_list(int fd
, struct intel_bo_list
*reloc_list
)
294 struct intel_bo_reloc_node
*reloc_node
;
295 drmMMListHead
*rl
, *tmp
;
297 for (rl
= reloc_list
->list
.next
, tmp
= rl
->next
; rl
!= &reloc_list
->list
; rl
= tmp
, tmp
= rl
->next
) {
298 reloc_node
= DRMLISTENTRY(struct intel_bo_reloc_node
, rl
, head
);
302 if (reloc_node
->nr_reloc_types
> 1) {
307 drmBOUnmap(fd
, &reloc_node
->type_list
.buf
);
308 drmBODestroy(fd
, &reloc_node
->type_list
.buf
);
313 static int intel_add_validate_buffer(struct intel_bo_list
*list
, dri_bo
*buf
, unsigned flags
,
314 unsigned mask
, int *itemLoc
, void (*destroy_cb
)(void *))
316 struct intel_bo_node
*node
, *cur
;
320 drmBO
*buf_bo
= &((dri_bo_ttm
*)buf
)->drm_bo
;
323 for (l
= list
->list
.next
; l
!= &list
->list
; l
= l
->next
) {
324 node
= DRMLISTENTRY(struct intel_bo_node
, l
, head
);
325 if (node
->buf
->handle
== buf_bo
->handle
) {
333 cur
= drmMalloc(sizeof(*cur
));
341 cur
->destroy
= destroy_cb
;
344 DRMLISTADDTAIL(&cur
->head
, &list
->list
);
347 unsigned memMask
= (cur
->arg1
| mask
) & DRM_BO_MASK_MEM
;
348 unsigned memFlags
= cur
->arg0
& flags
& memMask
;
353 if (mask
& cur
->arg1
& ~DRM_BO_MASK_MEM
& (cur
->arg0
^ flags
)) {
357 cur
->arg0
= memFlags
| ((cur
->arg0
| flags
) &
358 cur
->arg1
& ~DRM_BO_MASK_MEM
);
364 #define RELOC0_STRIDE 4
365 #define RELOC0_HEADER 4
366 #define RELOC_BUF_SIZE ((RELOC0_HEADER + MAX_RELOCS_PER_LIST * RELOC0_STRIDE) * sizeof(uint32_t))
368 static int intel_create_new_reloc_type_list(int fd
, struct intel_bo_reloc_list
*cur_type
)
372 /* should allocate a drmBO here */
373 ret
= drmBOCreate(fd
, 0, RELOC_BUF_SIZE
, 0,
374 NULL
, drm_bo_type_dc
,
375 DRM_BO_FLAG_MEM_LOCAL
| DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
| DRM_BO_FLAG_MAPPABLE
| DRM_BO_FLAG_CACHED
,
380 ret
= drmBOMap(fd
, &cur_type
->buf
, DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
, 0, (void **)&cur_type
->relocs
);
387 static int intel_add_validate_reloc(int fd
, struct intel_bo_list
*reloc_list
, struct intel_reloc_info
*reloc_info
)
389 struct intel_bo_reloc_node
*rl_node
, *cur
;
390 drmMMListHead
*rl
, *l
;
392 uint32_t *reloc_start
;
394 struct intel_bo_reloc_list
*cur_type
;
398 for (rl
= reloc_list
->list
.next
; rl
!= &reloc_list
->list
; rl
= rl
->next
) {
399 rl_node
= DRMLISTENTRY(struct intel_bo_reloc_node
, rl
, head
);
400 if (rl_node
->handle
== reloc_info
->handle
) {
408 cur
= malloc(sizeof(*cur
));
412 cur
->nr_reloc_types
= 1;
413 cur
->handle
= reloc_info
->handle
;
414 cur_type
= &cur
->type_list
;
416 DRMINITLISTHEAD(&cur
->type_list
.head
);
417 ret
= intel_create_new_reloc_type_list(fd
, cur_type
);
421 DRMLISTADDTAIL(&cur
->head
, &reloc_list
->list
);
423 cur_type
->relocs
[0] = 0 | (reloc_info
->type
<< 16);
424 cur_type
->relocs
[1] = 0; // next reloc buffer handle is 0
428 if ((cur
->type_list
.relocs
[0] >> 16) == reloc_info
->type
) {
429 cur_type
= &cur
->type_list
;
432 for (l
= cur
->type_list
.head
.next
; l
!= &cur
->type_list
.head
; l
= l
->next
) {
433 cur_type
= DRMLISTENTRY(struct intel_bo_reloc_list
, l
, head
);
434 if (((cur_type
->relocs
[0] >> 16) & 0xffff) == reloc_info
->type
)
440 /* didn't find the relocation type */
442 cur_type
= malloc(sizeof(*cur_type
));
447 ret
= intel_create_new_reloc_type_list(fd
, cur_type
);
448 DRMLISTADDTAIL(&cur_type
->head
, &cur
->type_list
.head
);
450 cur_type
->relocs
[0] = (reloc_info
->type
<< 16);
451 cur_type
->relocs
[1] = 0;
453 // cur->relocs[cur->nr_reloc_lists-1][1] = 0;// TODO ADD HANDLE HERE
455 cur
->nr_reloc_types
++;
459 reloc_start
= cur_type
->relocs
;
461 num_relocs
= (reloc_start
[0] & 0xffff);
463 reloc_start
[num_relocs
*RELOC0_STRIDE
+ RELOC0_HEADER
] = reloc_info
->reloc
;
464 reloc_start
[num_relocs
*RELOC0_STRIDE
+ RELOC0_HEADER
+1] = reloc_info
->delta
;
465 reloc_start
[num_relocs
*RELOC0_STRIDE
+ RELOC0_HEADER
+2] = reloc_info
->index
;
467 if (((reloc_start
[0] & 0xffff)) > (MAX_RELOCS_PER_LIST
)) {
476 driFenceSignaled(DriFenceObject
* fence
, unsigned type
)
484 _glthread_LOCK_MUTEX(fence
->mutex
);
485 ret
= drmFenceSignaled(bufmgr_ttm
->fd
, &fence
->fence
, type
, &signaled
);
486 _glthread_UNLOCK_MUTEX(fence
->mutex
);
493 dri_ttm_alloc(dri_bufmgr
*bufmgr
, const char *name
,
494 unsigned long size
, unsigned int alignment
,
495 unsigned int location_mask
)
497 dri_bufmgr_ttm
*ttm_bufmgr
;
499 unsigned int pageSize
= getpagesize();
501 unsigned int flags
, hint
;
503 ttm_bufmgr
= (dri_bufmgr_ttm
*)bufmgr
;
505 ttm_buf
= malloc(sizeof(*ttm_buf
));
509 /* The mask argument doesn't do anything for us that we want other than
510 * determine which pool (TTM or local) the buffer is allocated into, so just
511 * pass all of the allocation class flags.
513 flags
= location_mask
| DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
|
515 /* No hints we want to use. */
518 ret
= drmBOCreate(ttm_bufmgr
->fd
, 0, size
, alignment
/ pageSize
,
519 NULL
, drm_bo_type_dc
,
520 flags
, hint
, &ttm_buf
->drm_bo
);
525 ttm_buf
->bo
.size
= ttm_buf
->drm_bo
.size
;
526 ttm_buf
->bo
.offset
= ttm_buf
->drm_bo
.offset
;
527 ttm_buf
->bo
.virtual = NULL
;
528 ttm_buf
->bo
.bufmgr
= bufmgr
;
529 ttm_buf
->name
= name
;
530 ttm_buf
->refcount
= 1;
531 ttm_buf
->owner
= GL_TRUE
;
534 fprintf(stderr
, "bo_create: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
540 /* Our TTM backend doesn't allow creation of static buffers, as that requires
541 * privelege for the non-fake case, and the lock in the fake case where we were
542 * working around the X Server not creating buffers and passing handles to us.
545 dri_ttm_alloc_static(dri_bufmgr
*bufmgr
, const char *name
,
546 unsigned long offset
, unsigned long size
, void *virtual,
547 unsigned int location_mask
)
552 /** Returns a dri_bo wrapping the given buffer object handle.
554 * This can be used when one application needs to pass a buffer object
558 intel_ttm_bo_create_from_handle(dri_bufmgr
*bufmgr
, const char *name
,
561 dri_bufmgr_ttm
*ttm_bufmgr
;
565 ttm_bufmgr
= (dri_bufmgr_ttm
*)bufmgr
;
567 ttm_buf
= malloc(sizeof(*ttm_buf
));
571 ret
= drmBOReference(ttm_bufmgr
->fd
, handle
, &ttm_buf
->drm_bo
);
576 ttm_buf
->bo
.size
= ttm_buf
->drm_bo
.size
;
577 ttm_buf
->bo
.offset
= ttm_buf
->drm_bo
.offset
;
578 ttm_buf
->bo
.virtual = NULL
;
579 ttm_buf
->bo
.bufmgr
= bufmgr
;
580 ttm_buf
->name
= name
;
581 ttm_buf
->refcount
= 1;
582 ttm_buf
->owner
= GL_FALSE
;
585 fprintf(stderr
, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf
->bo
, handle
,
593 dri_ttm_bo_reference(dri_bo
*buf
)
595 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
596 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
598 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
600 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
604 dri_ttm_bo_unreference(dri_bo
*buf
)
606 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
607 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
612 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
613 if (--ttm_buf
->refcount
== 0) {
616 /* XXX Having to use drmBODestroy as the opposite of drmBOCreate instead
617 * of simply unreferencing is madness, and leads to behaviors we may not
618 * want (making the buffer unsharable).
621 ret
= drmBODestroy(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
623 ret
= drmBOUnReference(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
625 fprintf(stderr
, "drmBOUnReference failed (%s): %s\n", ttm_buf
->name
,
629 fprintf(stderr
, "bo_unreference final: %p (%s)\n",
630 &ttm_buf
->bo
, ttm_buf
->name
);
632 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
636 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
640 dri_ttm_bo_map(dri_bo
*buf
, GLboolean write_enable
)
642 dri_bufmgr_ttm
*bufmgr_ttm
;
643 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
646 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
648 flags
= DRM_BO_FLAG_READ
;
650 flags
|= DRM_BO_FLAG_WRITE
;
652 assert(buf
->virtual == NULL
);
655 fprintf(stderr
, "bo_map: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
658 return drmBOMap(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
, flags
, 0, &buf
->virtual);
662 dri_ttm_bo_unmap(dri_bo
*buf
)
664 dri_bufmgr_ttm
*bufmgr_ttm
;
665 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
670 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
672 assert(buf
->virtual != NULL
);
677 fprintf(stderr
, "bo_unmap: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
680 return drmBOUnmap(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
684 dri_ttm_validate(dri_bo
*buf
, unsigned int flags
)
686 dri_bufmgr_ttm
*bufmgr_ttm
;
687 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
691 /* XXX: Sanity-check whether we've already validated this one under
692 * different flags. See drmAddValidateItem().
695 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
697 /* Calculate the appropriate mask to pass to the DRM. There appears to be
698 * be a direct relationship to flags, so it's unnecessary to have it passed
701 mask
= DRM_BO_MASK_MEM
;
702 mask
|= flags
& (DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
| DRM_BO_FLAG_EXE
);
704 err
= drmBOValidate(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
, 0, flags
, mask
, 0);
707 /* XXX: add to fence list for sanity checking */
709 fprintf(stderr
, "failed to validate buffer (%s): %s\n",
710 ttm_buf
->name
, strerror(-err
));
713 buf
->offset
= ttm_buf
->drm_bo
.offset
;
716 fprintf(stderr
, "bo_validate: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
722 /* Returns a dri_bo wrapping the given buffer object handle.
724 * This can be used when one application needs to pass a buffer object
728 intel_ttm_fence_create_from_arg(dri_bufmgr
*bufmgr
, const char *name
,
729 drm_fence_arg_t
*arg
)
731 dri_bufmgr_ttm
*ttm_bufmgr
;
732 dri_fence_ttm
*ttm_fence
;
734 ttm_bufmgr
= (dri_bufmgr_ttm
*)bufmgr
;
736 ttm_fence
= malloc(sizeof(*ttm_fence
));
740 ttm_fence
->drm_fence
.handle
= arg
->handle
;
741 ttm_fence
->drm_fence
.fence_class
= arg
->fence_class
;
742 ttm_fence
->drm_fence
.type
= arg
->type
;
743 ttm_fence
->drm_fence
.flags
= arg
->flags
;
744 ttm_fence
->drm_fence
.signaled
= 0;
745 ttm_fence
->drm_fence
.sequence
= arg
->sequence
;
747 ttm_fence
->fence
.bufmgr
= bufmgr
;
748 ttm_fence
->name
= name
;
749 ttm_fence
->refcount
= 1;
752 fprintf(stderr
, "fence_create_from_handle: %p (%s)\n", &ttm_fence
->fence
,
756 return &ttm_fence
->fence
;
760 dri_ttm_fence_validated(dri_bufmgr
*bufmgr
, const char *name
,
767 dri_ttm_fence_reference(dri_fence
*fence
)
769 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
770 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
772 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
773 ++fence_ttm
->refcount
;
774 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
776 fprintf(stderr
, "fence_reference: %p (%s)\n", &fence_ttm
->fence
,
782 dri_ttm_fence_unreference(dri_fence
*fence
)
784 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
785 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
791 fprintf(stderr
, "fence_unreference: %p (%s)\n", &fence_ttm
->fence
,
794 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
795 if (--fence_ttm
->refcount
== 0) {
798 /* XXX Having to use drmFenceDestroy as the opposite of drmFenceBuffers
799 * instead of simply unreferencing is madness, and leads to behaviors we
800 * may not want (making the fence unsharable). This behavior by the DRM
801 * ioctls should be fixed, and drmFenceDestroy eliminated.
803 ret
= drmFenceDestroy(bufmgr_ttm
->fd
, &fence_ttm
->drm_fence
);
805 fprintf(stderr
, "drmFenceDestroy failed (%s): %s\n",
806 fence_ttm
->name
, strerror(-ret
));
809 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
813 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
817 dri_ttm_fence_wait(dri_fence
*fence
)
819 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
820 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
823 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
824 ret
= drmFenceWait(bufmgr_ttm
->fd
, 0, &fence_ttm
->drm_fence
, 0);
825 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
827 _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
828 __FILE__
, __LINE__
, ret
, fence_ttm
->name
);
833 fprintf(stderr
, "fence_wait: %p (%s)\n", &fence_ttm
->fence
,
839 dri_bufmgr_ttm_destroy(dri_bufmgr
*bufmgr
)
841 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)bufmgr
;
843 _glthread_DESTROY_MUTEX(bufmgr_ttm
->mutex
);
848 static void intel_dribo_destroy_callback(void *priv
)
850 dri_bo
*dribo
= priv
;
853 dri_bo_unreference(dribo
);
858 dri_ttm_emit_reloc(dri_bo
*batch_buf
, GLuint flags
, GLuint delta
, GLuint offset
,
861 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)batch_buf
;
862 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)batch_buf
->bufmgr
;
864 struct intel_reloc_info reloc
;
868 mask
= DRM_BO_MASK_MEM
;
869 mask
|= flags
& (DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
| DRM_BO_FLAG_EXE
);
871 ret
= intel_add_validate_buffer(&bufmgr_ttm
->list
, relocatee
, flags
, mask
, &newItem
, intel_dribo_destroy_callback
);
876 dri_bo_reference(relocatee
);
879 reloc
.type
= I915_RELOC_TYPE_0
;
880 reloc
.reloc
= offset
;
882 reloc
.index
= newItem
;
883 reloc
.handle
= ttm_buf
->drm_bo
.handle
;
885 intel_add_validate_reloc(bufmgr_ttm
->fd
, &bufmgr_ttm
->reloc_list
, &reloc
);
891 dri_ttm_process_reloc(dri_bo
*batch_buf
)
893 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)batch_buf
->bufmgr
;
897 dri_bo_unmap(batch_buf
);
899 intel_add_validate_buffer(&bufmgr_ttm
->list
, batch_buf
, DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_EXE
,
900 DRM_BO_MASK_MEM
| DRM_BO_FLAG_EXE
, &itemLoc
, NULL
);
902 ptr
= intel_setup_validate_list(bufmgr_ttm
->fd
, &bufmgr_ttm
->list
, &bufmgr_ttm
->reloc_list
);
908 dri_ttm_post_submit(dri_bo
*batch_buf
, dri_fence
**last_fence
)
910 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)batch_buf
->bufmgr
;
912 intel_free_validate_list(bufmgr_ttm
->fd
, &bufmgr_ttm
->list
);
913 intel_free_reloc_list(bufmgr_ttm
->fd
, &bufmgr_ttm
->reloc_list
);
915 intel_bo_reset_list(&bufmgr_ttm
->list
);
919 * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
920 * and manage map buffer objections.
922 * \param fd File descriptor of the opened DRM device.
923 * \param fence_type Driver-specific fence type used for fences with no flush.
924 * \param fence_type_flush Driver-specific fence type used for fences with a
928 intel_bufmgr_ttm_init(int fd
, unsigned int fence_type
,
929 unsigned int fence_type_flush
)
931 dri_bufmgr_ttm
*bufmgr_ttm
;
933 bufmgr_ttm
= malloc(sizeof(*bufmgr_ttm
));
935 bufmgr_ttm
->fence_type
= fence_type
;
936 bufmgr_ttm
->fence_type_flush
= fence_type_flush
;
937 _glthread_INIT_MUTEX(bufmgr_ttm
->mutex
);
939 intel_create_bo_list(10, &bufmgr_ttm
->list
, NULL
);
940 intel_create_bo_list(1, &bufmgr_ttm
->reloc_list
, NULL
);
942 bufmgr_ttm
->bufmgr
.bo_alloc
= dri_ttm_alloc
;
943 bufmgr_ttm
->bufmgr
.bo_alloc_static
= dri_ttm_alloc_static
;
944 bufmgr_ttm
->bufmgr
.bo_reference
= dri_ttm_bo_reference
;
945 bufmgr_ttm
->bufmgr
.bo_unreference
= dri_ttm_bo_unreference
;
946 bufmgr_ttm
->bufmgr
.bo_map
= dri_ttm_bo_map
;
947 bufmgr_ttm
->bufmgr
.bo_unmap
= dri_ttm_bo_unmap
;
948 bufmgr_ttm
->bufmgr
.bo_validate
= dri_ttm_validate
;
949 bufmgr_ttm
->bufmgr
.fence_validated
= dri_ttm_fence_validated
;
950 bufmgr_ttm
->bufmgr
.fence_reference
= dri_ttm_fence_reference
;
951 bufmgr_ttm
->bufmgr
.fence_unreference
= dri_ttm_fence_unreference
;
952 bufmgr_ttm
->bufmgr
.fence_wait
= dri_ttm_fence_wait
;
953 bufmgr_ttm
->bufmgr
.destroy
= dri_bufmgr_ttm_destroy
;
954 bufmgr_ttm
->bufmgr
.emit_reloc
= dri_ttm_emit_reloc
;
955 bufmgr_ttm
->bufmgr
.process_relocs
= dri_ttm_process_reloc
;
956 bufmgr_ttm
->bufmgr
.post_submit
= dri_ttm_post_submit
;
957 return &bufmgr_ttm
->bufmgr
;