1 /**************************************************************************
3 * Copyright © 2007 Intel Corporation
4 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
28 **************************************************************************/
30 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
31 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
32 * Eric Anholt <eric@anholt.net>
41 #include "dri_bufmgr.h"
45 #define BUFMGR_DEBUG 0
47 typedef struct _dri_bufmgr_ttm
{
51 _glthread_Mutex mutex
;
52 unsigned int fence_type
;
53 unsigned int fence_type_flush
;
56 typedef struct _dri_bo_ttm
{
59 int refcount
; /* Protected by bufmgr->mutex */
63 * Note whether we are the owner of the buffer, to determine if we must
64 * drmBODestroy or drmBOUnreference to unreference the buffer.
69 typedef struct _dri_fence_ttm
73 int refcount
; /* Protected by bufmgr->mutex */
80 driFenceSignaled(DriFenceObject
* fence
, unsigned type
)
88 _glthread_LOCK_MUTEX(fence
->mutex
);
89 ret
= drmFenceSignaled(bufmgr_ttm
->fd
, &fence
->fence
, type
, &signaled
);
90 _glthread_UNLOCK_MUTEX(fence
->mutex
);
97 dri_ttm_alloc(dri_bufmgr
*bufmgr
, const char *name
,
98 unsigned long size
, unsigned int alignment
,
99 unsigned int location_mask
)
101 dri_bufmgr_ttm
*ttm_bufmgr
;
103 unsigned int pageSize
= getpagesize();
105 unsigned int flags
, hint
;
107 ttm_bufmgr
= (dri_bufmgr_ttm
*)bufmgr
;
109 ttm_buf
= malloc(sizeof(*ttm_buf
));
113 /* The mask argument doesn't do anything for us that we want other than
114 * determine which pool (TTM or local) the buffer is allocated into, so just
115 * pass all of the allocation class flags.
117 flags
= location_mask
| DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
|
119 /* No hints we want to use. */
122 ret
= drmBOCreate(ttm_bufmgr
->fd
, 0, size
, alignment
/ pageSize
,
123 NULL
, drm_bo_type_dc
,
124 flags
, hint
, &ttm_buf
->drm_bo
);
129 ttm_buf
->bo
.size
= ttm_buf
->drm_bo
.size
;
130 ttm_buf
->bo
.offset
= ttm_buf
->drm_bo
.offset
;
131 ttm_buf
->bo
.virtual = NULL
;
132 ttm_buf
->bo
.bufmgr
= bufmgr
;
133 ttm_buf
->name
= name
;
134 ttm_buf
->refcount
= 1;
135 ttm_buf
->owner
= GL_TRUE
;
138 fprintf(stderr
, "bo_create: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
144 /* Our TTM backend doesn't allow creation of static buffers, as that requires
145 * privelege for the non-fake case, and the lock in the fake case where we were
146 * working around the X Server not creating buffers and passing handles to us.
149 dri_ttm_alloc_static(dri_bufmgr
*bufmgr
, const char *name
,
150 unsigned long offset
, unsigned long size
, void *virtual,
151 unsigned int location_mask
)
156 /** Returns a dri_bo wrapping the given buffer object handle.
158 * This can be used when one application needs to pass a buffer object
162 dri_ttm_bo_create_from_handle(dri_bufmgr
*bufmgr
, const char *name
,
165 dri_bufmgr_ttm
*ttm_bufmgr
;
169 ttm_bufmgr
= (dri_bufmgr_ttm
*)bufmgr
;
171 ttm_buf
= malloc(sizeof(*ttm_buf
));
175 ret
= drmBOReference(ttm_bufmgr
->fd
, handle
, &ttm_buf
->drm_bo
);
180 ttm_buf
->bo
.size
= ttm_buf
->drm_bo
.size
;
181 ttm_buf
->bo
.offset
= ttm_buf
->drm_bo
.offset
;
182 ttm_buf
->bo
.virtual = NULL
;
183 ttm_buf
->bo
.bufmgr
= bufmgr
;
184 ttm_buf
->name
= name
;
185 ttm_buf
->refcount
= 1;
186 ttm_buf
->owner
= GL_FALSE
;
189 fprintf(stderr
, "bo_create_from_handle: %p (%s)\n", &ttm_buf
->bo
,
197 dri_ttm_bo_reference(dri_bo
*buf
)
199 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
200 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
202 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
204 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
208 dri_ttm_bo_unreference(dri_bo
*buf
)
210 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
211 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
216 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
217 if (--ttm_buf
->refcount
== 0) {
220 /* XXX Having to use drmBODestroy as the opposite of drmBOCreate instead
221 * of simply unreferencing is madness, and leads to behaviors we may not
222 * want (making the buffer unsharable).
225 ret
= drmBODestroy(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
227 ret
= drmBOUnReference(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
229 fprintf(stderr
, "drmBOUnReference failed (%s): %s\n", ttm_buf
->name
,
233 fprintf(stderr
, "bo_unreference final: %p (%s)\n",
234 &ttm_buf
->bo
, ttm_buf
->name
);
236 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
240 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
244 dri_ttm_bo_map(dri_bo
*buf
, GLboolean write_enable
)
246 dri_bufmgr_ttm
*bufmgr_ttm
;
247 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
250 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
252 flags
= DRM_BO_FLAG_READ
;
254 flags
|= DRM_BO_FLAG_WRITE
;
256 assert(buf
->virtual == NULL
);
259 fprintf(stderr
, "bo_map: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
262 return drmBOMap(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
, flags
, 0, &buf
->virtual);
266 dri_ttm_bo_unmap(dri_bo
*buf
)
268 dri_bufmgr_ttm
*bufmgr_ttm
;
269 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
274 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
276 assert(buf
->virtual != NULL
);
281 fprintf(stderr
, "bo_unmap: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
284 return drmBOUnmap(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
);
288 dri_ttm_validate(dri_bo
*buf
, unsigned int flags
)
290 dri_bufmgr_ttm
*bufmgr_ttm
;
291 dri_bo_ttm
*ttm_buf
= (dri_bo_ttm
*)buf
;
295 /* XXX: Sanity-check whether we've already validated this one under
296 * different flags. See drmAddValidateItem().
299 bufmgr_ttm
= (dri_bufmgr_ttm
*)buf
->bufmgr
;
301 /* Calculate the appropriate mask to pass to the DRM. There appears to be
302 * be a direct relationship to flags, so it's unnecessary to have it passed
305 mask
= DRM_BO_MASK_MEM
;
306 mask
|= flags
& (DRM_BO_FLAG_READ
| DRM_BO_FLAG_WRITE
| DRM_BO_FLAG_EXE
);
308 err
= drmBOValidate(bufmgr_ttm
->fd
, &ttm_buf
->drm_bo
, 0, flags
, mask
, 0);
311 /* XXX: add to fence list for sanity checking */
313 fprintf(stderr
, "failed to validate buffer (%s): %s\n",
314 ttm_buf
->name
, strerror(-err
));
317 buf
->offset
= ttm_buf
->drm_bo
.offset
;
320 fprintf(stderr
, "bo_validate: %p (%s)\n", &ttm_buf
->bo
, ttm_buf
->name
);
327 dri_ttm_fence_validated(dri_bufmgr
*bufmgr
, const char *name
,
330 dri_fence_ttm
*fence_ttm
= malloc(sizeof(*fence_ttm
));
331 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)bufmgr
;
339 type
= bufmgr_ttm
->fence_type_flush
;
341 type
= bufmgr_ttm
->fence_type
;
343 fence_ttm
->refcount
= 1;
344 fence_ttm
->name
= name
;
345 fence_ttm
->fence
.bufmgr
= bufmgr
;
346 ret
= drmFenceBuffers(bufmgr_ttm
->fd
, type
, 0, &fence_ttm
->drm_fence
);
348 fprintf(stderr
, "failed to fence (%s): %s\n", name
, strerror(-ret
));
354 fprintf(stderr
, "fence_validated: %p (%s)\n", &fence_ttm
->fence
,
358 return &fence_ttm
->fence
;
362 dri_ttm_fence_reference(dri_fence
*fence
)
364 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
365 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
367 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
368 ++fence_ttm
->refcount
;
369 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
373 dri_ttm_fence_unreference(dri_fence
*fence
)
375 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
376 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
381 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
382 if (--fence_ttm
->refcount
== 0) {
385 /* XXX Having to use drmFenceDestroy as the opposite of drmFenceBuffers
386 * instead of simply unreferencing is madness, and leads to behaviors we
387 * may not want (making the fence unsharable). This behavior by the DRM
388 * ioctls should be fixed, and drmFenceDestroy eliminated.
390 ret
= drmFenceDestroy(bufmgr_ttm
->fd
, &fence_ttm
->drm_fence
);
392 fprintf(stderr
, "drmFenceDestroy failed (%s): %s\n",
393 fence_ttm
->name
, strerror(-ret
));
396 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
400 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
404 dri_ttm_fence_wait(dri_fence
*fence
)
406 dri_fence_ttm
*fence_ttm
= (dri_fence_ttm
*)fence
;
407 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)fence
->bufmgr
;
410 _glthread_LOCK_MUTEX(bufmgr_ttm
->mutex
);
411 ret
= drmFenceWait(bufmgr_ttm
->fd
, 0, &fence_ttm
->drm_fence
, 0);
412 _glthread_UNLOCK_MUTEX(bufmgr_ttm
->mutex
);
414 _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
415 __FILE__
, __LINE__
, ret
, fence_ttm
->name
);
420 fprintf(stderr
, "fence_wait: %p (%s)\n", &fence_ttm
->fence
,
426 dri_bufmgr_ttm_destroy(dri_bufmgr
*bufmgr
)
428 dri_bufmgr_ttm
*bufmgr_ttm
= (dri_bufmgr_ttm
*)bufmgr
;
430 _glthread_DESTROY_MUTEX(bufmgr_ttm
->mutex
);
435 * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
436 * and manage map buffer objections.
438 * \param fd File descriptor of the opened DRM device.
439 * \param fence_type Driver-specific fence type used for fences with no flush.
440 * \param fence_type_flush Driver-specific fence type used for fences with a
444 dri_bufmgr_ttm_init(int fd
, unsigned int fence_type
,
445 unsigned int fence_type_flush
)
447 dri_bufmgr_ttm
*bufmgr_ttm
;
449 bufmgr_ttm
= malloc(sizeof(*bufmgr_ttm
));
451 bufmgr_ttm
->fence_type
= fence_type
;
452 bufmgr_ttm
->fence_type_flush
= fence_type_flush
;
453 _glthread_INIT_MUTEX(bufmgr_ttm
->mutex
);
455 bufmgr_ttm
->bufmgr
.bo_alloc
= dri_ttm_alloc
;
456 bufmgr_ttm
->bufmgr
.bo_alloc_static
= dri_ttm_alloc_static
;
457 bufmgr_ttm
->bufmgr
.bo_reference
= dri_ttm_bo_reference
;
458 bufmgr_ttm
->bufmgr
.bo_unreference
= dri_ttm_bo_unreference
;
459 bufmgr_ttm
->bufmgr
.bo_map
= dri_ttm_bo_map
;
460 bufmgr_ttm
->bufmgr
.bo_unmap
= dri_ttm_bo_unmap
;
461 bufmgr_ttm
->bufmgr
.bo_validate
= dri_ttm_validate
;
462 bufmgr_ttm
->bufmgr
.fence_validated
= dri_ttm_fence_validated
;
463 bufmgr_ttm
->bufmgr
.fence_reference
= dri_ttm_fence_reference
;
464 bufmgr_ttm
->bufmgr
.fence_unreference
= dri_ttm_fence_unreference
;
465 bufmgr_ttm
->bufmgr
.fence_wait
= dri_ttm_fence_wait
;
466 bufmgr_ttm
->bufmgr
.destroy
= dri_bufmgr_ttm_destroy
;
468 return &bufmgr_ttm
->bufmgr
;