intel_context.c \
intel_lock.c \
intel_screen.c \
- intel_batchpool.c
+ ws_dri_bufmgr.c \
+ ws_dri_drmpool.c \
+ ws_dri_fencemgr.c \
+ ws_dri_mallocpool.c \
+ ws_dri_slabpool.c
C_SOURCES = \
$(COMMON_GALLIUM_SOURCES) \
- $(COMMON_BM_SOURCES) \
$(DRIVER_SOURCES)
ASM_SOURCES =
*
**************************************************************************/
-#include <errno.h>
-#include <stdio.h>
#include "intel_batchbuffer.h"
-#include "intel_context.h"
-#include "intel_screen.h"
-#include "intel_reg.h"
-#include "drm.h"
-
-/* Relocations in kernel space:
- * - pass dma buffer seperately
- * - memory manager knows how to patch
- * - pass list of dependent buffers
- * - pass relocation list
- *
- * Either:
- * - get back an offset for buffer to fire
- * - memory manager knows how to fire buffer
- *
- * Really want the buffer to be AGP and pinned.
- *
- */
-
-/* Cliprect fence: The highest fence protecting a dma buffer
- * containing explicit cliprect information. Like the old drawable
- * lock but irq-driven. X server must wait for this fence to expire
- * before changing cliprects [and then doing sw rendering?]. For
- * other dma buffers, the scheduler will grab current cliprect info
- * and mix into buffer. X server must hold the lock while changing
- * cliprects??? Make per-drawable. Need cliprects in shared memory
- * -- beats storing them with every cmd buffer in the queue.
- *
- * ==> X server must wait for this fence to expire before touching the
- * framebuffer with new cliprects.
- *
- * ==> Cliprect-dependent buffers associated with a
- * cliprect-timestamp. All of the buffers associated with a timestamp
- * must go to hardware before any buffer with a newer timestamp.
- *
- * ==> Dma should be queued per-drawable for correct X/GL
- * synchronization. Or can fences be used for this?
- *
- * Applies to: Blit operations, metaops, X server operations -- X
- * server automatically waits on its own dma to complete before
- * modifying cliprects ???
- */
+#include "intel_ioctl.h"
+#include <errno.h>
+#if 0
static void
-intel_dump_batchbuffer(uint offset, uint * ptr, uint count)
+intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
{
int i;
- printf("\n\n\nSTART BATCH (%d dwords):\n", count / 4);
- for (i = 0; i < count / 4; i += 1)
- printf("\t0x%08x\n", ptr[i]);
- printf("END BATCH\n\n\n");
+ fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
+ for (i = 0; i < count / 4; i += 4)
+ fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
+ offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
+ fprintf(stderr, "END BATCH\n\n\n");
+}
+#endif
+
+static void
+intel_realloc_relocs(struct intel_batchbuffer *batch, int num_relocs)
+{
+ unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
+
+ size *= sizeof(uint32_t);
+ batch->reloc = realloc(batch->reloc, size);
+ batch->reloc_size = num_relocs;
}
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
- int i;
-
- if (batch->map) {
- driBOUnmap(batch->buffer);
- batch->map = NULL;
- }
-
/*
* Get a new, free batchbuffer.
*/
- batch->size = BATCH_SZ;
- driBOData(batch->buffer, batch->size, NULL, 0);
+ drmBO *bo;
+ struct drm_bo_info_req *req;
+
+ driBOUnrefUserList(batch->list);
+ driBOResetList(batch->list);
- driBOResetList(&batch->list);
+ batch->size = 4096; // ZZZ JB batch->intel->intelScreen->maxBatchSize;
+ driBOData(batch->buffer, batch->size, NULL, NULL, 0);
/*
- * Unreference buffers previously on the relocation list.
+ * Add the batchbuffer to the validate list.
*/
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
- driBOUnReference(r->buf);
- }
- batch->list_count = 0;
- batch->nr_relocs = 0;
- batch->flags = 0;
+ driBOAddListItem(batch->list, batch->buffer,
+ DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
+ &batch->dest_location, &batch->node);
+
+ req = &batch->node->bo_arg.d.req.bo_req;
/*
- * We don't refcount the batchbuffer itself since we can't destroy it
- * while it's on the list.
+ * Set up information needed for us to make relocations
+ * relative to the underlying drm buffer objects.
*/
- driBOAddListItem(&batch->list, batch->buffer,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
- DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE);
+ driReadLockKernelBO();
+ bo = driBOKernel(batch->buffer);
+ req->presumed_offset = (uint64_t) bo->offset;
+ req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
+ batch->drmBOVirtual = (uint8_t *) bo->virtual;
+ driReadUnlockKernelBO();
+ /*
+ * Adjust the relocation buffer size.
+ */
+
+ if (batch->reloc_size > INTEL_MAX_RELOCS ||
+ batch->reloc == NULL)
+ intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
+
+ assert(batch->reloc != NULL);
+ batch->reloc[0] = 0; /* No relocs yet. */
+ batch->reloc[1] = 1; /* Reloc type 1 */
+ batch->reloc[2] = 0; /* Only a single relocation list. */
+ batch->reloc[3] = 0; /* Only a single relocation list. */
batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
+ batch->poolOffset = driBOPoolOffset(batch->buffer);
batch->ptr = batch->map;
+ batch->dirty_state = ~0;
+ batch->nr_relocs = 0;
+ batch->flags = 0;
+ batch->id = 0;//batch->intel->intelScreen->batch_id++;
}
-
/*======================================================================
* Public functions
*/
&batch->buffer, 4096,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
- driBOCreateList(20, &batch->list);
+ batch->list = driBOCreateList(20);
+ batch->reloc = NULL;
intel_batchbuffer_reset(batch);
return batch;
}
-
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
if (batch->last_fence) {
driFenceFinish(batch->last_fence,
- DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
- GL_FALSE);
- driFenceUnReference(batch->last_fence);
- batch->last_fence = NULL;
+ DRM_FENCE_TYPE_EXE, GL_FALSE);
+ driFenceUnReference(&batch->last_fence);
}
if (batch->map) {
driBOUnmap(batch->buffer);
batch->map = NULL;
}
driBOUnReference(batch->buffer);
+ driBOFreeList(batch->list);
+ if (batch->reloc)
+ free(batch->reloc);
batch->buffer = NULL;
free(batch);
}
+void
+intel_offset_relocation(struct intel_batchbuffer *batch,
+ unsigned pre_add,
+ struct _DriBufferObject *driBO,
+ uint64_t val_flags,
+ uint64_t val_mask)
+{
+ int itemLoc;
+ struct _drmBONode *node;
+ uint32_t *reloc;
+ struct drm_bo_info_req *req;
+
+ driBOAddListItem(batch->list, driBO, val_flags, val_mask,
+ &itemLoc, &node);
+ req = &node->bo_arg.d.req.bo_req;
+
+ if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
+
+ /*
+ * Stop other threads from tampering with the underlying
+ * drmBO while we're reading its offset.
+ */
+
+ driReadLockKernelBO();
+ req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
+ driReadUnlockKernelBO();
+ req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
+ }
+
+ pre_add += driBOPoolOffset(driBO);
+
+ if (batch->nr_relocs == batch->reloc_size)
+ intel_realloc_relocs(batch, batch->reloc_size * 2);
+
+ reloc = batch->reloc +
+ (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
+
+ reloc[0] = ((uint8_t *)batch->ptr - batch->drmBOVirtual);
+ intel_batchbuffer_emit_dword(batch, req->presumed_offset + pre_add);
+ reloc[1] = pre_add;
+ reloc[2] = itemLoc;
+ reloc[3] = batch->dest_location;
+ batch->nr_relocs++;
+}
static void
-intel_batch_ioctl(struct intel_context *intel,
- uint start_offset, uint used, boolean allow_unlock)
+i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
{
- drmI830BatchBuffer batch;
-
- batch.start = start_offset;
- batch.used = used;
- batch.cliprects = NULL; /* unused */
- batch.num_cliprects = 0;
- batch.DR1 = 0;
- batch.DR4 = 0; /* still need this ? */
-
- DBG(IOCTL, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
- __FUNCTION__,
- batch.start,
- batch.start + batch.used * 4, batch.DR4, batch.num_cliprects);
-
- if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch,
- sizeof(batch))) {
- printf("DRM_I830_BATCHBUFFER: %d\n", -errno);
- UNLOCK_HARDWARE(intel);
- exit(1);
- }
+ buf->handle = rep->handle;
+ buf->flags = rep->flags;
+ buf->size = rep->size;
+ buf->offset = rep->offset;
+ buf->mapHandle = rep->arg_handle;
+ buf->proposedFlags = rep->proposed_flags;
+ buf->start = rep->buffer_start;
+ buf->fenceFlags = rep->fence_flags;
+ buf->replyFlags = rep->rep_flags;
+ buf->pageAlignment = rep->page_alignment;
}
+static int
+i915_execbuf(struct intel_batchbuffer *batch,
+ GLuint used,
+ GLboolean ignore_cliprects,
+ drmBOList *list,
+ struct drm_i915_execbuffer *ea)
+{
+ struct intel_context *intel = batch->intel;
+ drmBONode *node;
+ drmMMListHead *l;
+ struct drm_i915_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_info_rep *rep;
+ uint64_t *prevNext = NULL;
+ drmBO *buf;
+ int ret = 0;
+ uint32_t count = 0;
+
+ first = NULL;
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+
+ arg = &node->bo_arg;
+ req = &arg->d.req;
+
+ if (!first)
+ first = arg;
+
+ if (prevNext)
+ *prevNext = (unsigned long)arg;
+
+ prevNext = &arg->next;
+ req->bo_req.handle = node->buf->handle;
+ req->op = drm_bo_validate;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.hint |= 0;
+ count++;
+ }
+
+ memset(ea, 0, sizeof(*ea));
+ ea->num_buffers = count;
+ ea->batch.start = batch->poolOffset;
+ ea->batch.used = used;
+#if 0 /* ZZZ JB: no cliprects used */
+ ea->batch.cliprects = intel->pClipRects;
+ ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
+ ea->batch.DR1 = 0;
+ ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
+ (((GLuint) intel->drawY) << 16));
+#else
+ ea->batch.cliprects = NULL;
+ ea->batch.num_cliprects = 0;
+ ea->batch.DR1 = 0;
+ ea->batch.DR4 = 0;
+#endif
+ ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
+ ea->ops_list = (unsigned long) first;
+ first->reloc_ptr = (unsigned long) batch->reloc;
+ batch->reloc[0] = batch->nr_relocs;
+
+ //return -EFAULT;
+ do {
+ ret = drmCommandWriteRead(intel->driFd, DRM_I915_EXECBUFFER, ea,
+ sizeof(*ea));
+ } while (ret == -EAGAIN);
+
+ if (ret != 0)
+ return ret;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ arg = &node->bo_arg;
+ rep = &arg->d.rep.bo_info;
+
+ if (!arg->handled) {
+ return -EFAULT;
+ }
+ if (arg->d.rep.ret)
+ return arg->d.rep.ret;
+
+ buf = node->buf;
+ i915_drm_copy_reply(rep, buf);
+ }
+ return 0;
+}
/* TODO: Push this whole function into bufmgr.
*/
-static void
+static struct _DriFenceObject *
do_flush_locked(struct intel_batchbuffer *batch,
- uint used, boolean allow_unlock)
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
{
- uint *ptr;
- uint i, fenceFlags;
+ struct intel_context *intel = batch->intel;
struct _DriFenceObject *fo;
+ drmFence fence;
+ drmBOList *boList;
+ struct drm_i915_execbuffer ea;
+ int ret = 0;
+
+ driBOValidateUserList(batch->list);
+ boList = driGetdrmBOList(batch->list);
+
+#if 0 /* ZZZ JB Allways run */
+ if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
+#else
+ if (1) {
+#endif
+ ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
+ } else {
+ driPutdrmBOList(batch->list);
+ fo = NULL;
+ goto out;
+ }
+ driPutdrmBOList(batch->list);
+ if (ret)
+ abort();
- driBOValidateList(batch->intel->driFd, &batch->list);
-
- /* Apply the relocations. This nasty map indicates to me that the
- * whole task should be done internally by the memory manager, and
- * that dma buffers probably need to be pinned within agp space.
- */
- ptr = (uint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE,
- DRM_BO_HINT_ALLOW_UNFENCED_MAP);
+ if (ea.fence_arg.error != 0) {
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
+ /*
+ * The hardware has been idled by the kernel.
+ * Don't fence the driBOs.
+ */
- ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
+ if (batch->last_fence)
+ driFenceUnReference(&batch->last_fence);
+#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
+ _mesa_printf("fence error\n");
+#endif
+ batch->last_fence = NULL;
+ fo = NULL;
+ goto out;
}
- if (0)
- intel_dump_batchbuffer(0, ptr, used);
+ fence.handle = ea.fence_arg.handle;
+ fence.fence_class = ea.fence_arg.fence_class;
+ fence.type = ea.fence_arg.type;
+ fence.flags = ea.fence_arg.flags;
+ fence.signaled = ea.fence_arg.signaled;
- driBOUnmap(batch->buffer);
- batch->map = NULL;
-
- intel_batch_ioctl(batch->intel,
- driBOOffset(batch->buffer),
- used, allow_unlock);
-
- /*
- * Kernel fencing. The flags tells the kernel that we've
- * programmed an MI_FLUSH.
- */
- fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
- fo = driFenceBuffers(batch->intel->driFd, "Batch fence", fenceFlags);
+ fo = driBOFenceUserList(batch->intel->intelScreen->mgr, batch->list,
+ "SuperFence", &fence);
+ if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
+ if (batch->last_fence)
+ driFenceUnReference(&batch->last_fence);
/*
- * User space fencing.
- */
- driBOFence(batch->buffer, fo);
-
- if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) {
- /*
- * Oops. We only validated a batch buffer. This means we
- * didn't do any proper rendering. Discard this fence object.
- */
- driFenceUnReference(fo);
- }
- else {
- driFenceUnReference(batch->last_fence);
- batch->last_fence = fo;
- for (i = 0; i < batch->nr_relocs; i++) {
- struct buffer_reloc *r = &batch->reloc[i];
- driBOFence(r->buf, fo);
- }
- }
+ * FIXME: Context last fence??
+ */
+ batch->last_fence = fo;
+ driFenceReference(fo);
+ }
+ out:
+#if 0 /* ZZZ JB: fix this */
+ intel->vtbl.lost_hardware(intel);
+#else
+ (void)intel;
+#endif
+ return fo;
}
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
- uint used = batch->ptr - batch->map;
- const boolean was_locked = intel->locked;
+ GLuint used = batch->ptr - batch->map;
+ GLboolean was_locked = intel->locked;
+ struct _DriFenceObject *fence;
if (used == 0)
return batch->last_fence;
-#define MI_FLUSH ((0 << 29) | (4 << 23))
-
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
+#if 0 /* ZZZ JB: what should we do here? */
if (used & 4) {
- ((int *) batch->ptr)[0] = MI_FLUSH;
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
- ((int *) batch->ptr)[0] = MI_FLUSH;
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
-
+#else
+ if (used & 4) {
+ ((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
+ ((int *) batch->ptr)[1] = 0;
+ ((int *) batch->ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
+ used += 12;
+ }
+ else {
+ ((int *) batch->ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
+ ((int *) batch->ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
+ used += 8;
+ }
+#endif
driBOUnmap(batch->buffer);
batch->ptr = NULL;
batch->map = NULL;
if (!was_locked)
LOCK_HARDWARE(intel);
- do_flush_locked(batch, used, GL_FALSE);
+ fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
+ GL_FALSE);
if (!was_locked)
UNLOCK_HARDWARE(intel);
/* Reset the buffer:
*/
intel_batchbuffer_reset(batch);
- return batch->last_fence;
+ return fence;
}
-
void
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
- if (fence) {
- driFenceReference(fence);
- driFenceFinish(fence,
- DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
- GL_FALSE);
- driFenceUnReference(fence);
- }
+ driFenceFinish(fence, driFenceType(fence), GL_FALSE);
+ driFenceUnReference(&fence);
}
-
-/* This is the only way buffers get added to the validate list.
- */
-boolean
-intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- struct _DriBufferObject *buffer,
- uint flags, uint mask, uint delta)
-{
- assert(batch->nr_relocs < MAX_RELOCS);
-
- driBOAddListItem(&batch->list, buffer, flags, mask);
-
- {
- struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
- driBOReference(buffer);
- r->buf = buffer;
- r->offset = batch->ptr - batch->map;
- r->delta = delta;
- *(uint *) batch->ptr = 0x12345678;
- }
-
- batch->ptr += 4;
- return GL_TRUE;
-}
-
-
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, uint bytes, uint flags)
+ const void *data, GLuint bytes, GLuint flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
-/**************************************************************************
- *
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
-#include "pipe/p_debug.h"
-#include "pipe/p_compiler.h"
-#include "dri_bufmgr.h"
+#include "mtypes.h"
+#include "ws_dri_bufmgr.h"
struct intel_context;
#define BATCH_SZ 16384
#define BATCH_RESERVED 16
-#define MAX_RELOCS 4096
+#define INTEL_DEFAULT_RELOCS 100
+#define INTEL_MAX_RELOCS 400
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
-struct buffer_reloc
-{
- struct _DriBufferObject *buf;
- uint offset;
- uint delta; /* not needed? */
-};
-
struct intel_batchbuffer
{
struct bufmgr *bm;
struct _DriBufferObject *buffer;
struct _DriFenceObject *last_fence;
- uint flags;
+ GLuint flags;
- drmBOList list;
- uint list_count;
- ubyte *map;
- ubyte *ptr;
+ struct _DriBufferList *list;
+ GLuint list_count;
+ GLubyte *map;
+ GLubyte *ptr;
- struct buffer_reloc reloc[MAX_RELOCS];
- uint nr_relocs;
- uint size;
+ uint32_t *reloc;
+ GLuint reloc_size;
+ GLuint nr_relocs;
+
+ GLuint size;
+
+ GLuint dirty_state;
+ GLuint id;
+
+ uint32_t poolOffset;
+ uint8_t *drmBOVirtual;
+ struct _drmBONode *node; /* Validation list node for this buffer */
+ int dest_location; /* Validation list sequence for this buffer */
};
-struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context *intel);
+struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
+ *intel);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
/* Unlike bmBufferData, this currently requires the buffer be mapped.
- * Consider it a convenience function wrapping multiple
+ * Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, uint bytes, uint flags);
+ const void *data, GLuint bytes, GLuint flags);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
- uint bytes);
+ GLuint bytes);
-boolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- struct _DriBufferObject *buffer,
- uint flags,
- uint mask, uint offset);
+void
+intel_offset_relocation(struct intel_batchbuffer *batch,
+ unsigned pre_add,
+ struct _DriBufferObject *driBO,
+ uint64_t val_flags,
+ uint64_t val_mask);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
-static INLINE uint
+static INLINE GLuint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
static INLINE void
-intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, uint dword)
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
assert(batch->map);
assert(intel_batchbuffer_space(batch) >= 4);
- *(uint *) (batch->ptr) = dword;
+ *(GLuint *) (batch->ptr) = dword;
batch->ptr += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
- uint sz, uint flags)
+ GLuint sz, GLuint flags)
{
+ struct _DriFenceObject *fence;
+
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz ||
- (batch->flags != 0 && flags != 0 && batch->flags != flags))
- intel_batchbuffer_flush(batch);
+ (batch->flags != 0 && flags != 0 && batch->flags != flags)) {
+ fence = intel_batchbuffer_flush(batch);
+ driFenceUnReference(&fence);
+ }
batch->flags |= flags;
}
#define BATCH_LOCALS
#define BEGIN_BATCH(n, flags) do { \
+ assert(!intel->prim.flush); \
intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf,flags,mask,delta) do { \
- assert((delta) >= 0); \
- intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
+ assert((delta) >= 0); \
+ intel_offset_relocation(intel->batch, delta, buf, flags, mask); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-/**
- * XXX NOTE: there are no intel dependencies in this file.
- * Rename to dri_batchpool.c?
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-
-#include "pipe/p_compiler.h"
-#include "pipe/p_thread.h"
-
-#include "dri_bufpool.h"
-#include "dri_bufmgr.h"
-#include "intel_batchpool.h"
-
-
-typedef struct
-{
- drmMMListHead head;
- struct _BPool *parent;
- struct _DriFenceObject *fence;
- unsigned long start;
- int unfenced;
- int mapped;
-} BBuf;
-
-typedef struct _BPool
-{
- _glthread_Mutex mutex;
- unsigned long bufSize;
- unsigned poolSize;
- unsigned numFree;
- unsigned numTot;
- unsigned numDelayed;
- unsigned checkDelayed;
- drmMMListHead free;
- drmMMListHead delayed;
- drmMMListHead head;
- drmBO kernelBO;
- void *virtual;
- BBuf *bufs;
-} BPool;
-
-
-static BPool *
-createBPool(int fd, unsigned long bufSize, unsigned numBufs, unsigned flags,
- unsigned checkDelayed)
-{
- BPool *p = (BPool *) malloc(sizeof(*p));
- BBuf *buf;
- int i;
-
- if (!p)
- return NULL;
-
- p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
- if (!p->bufs) {
- free(p);
- return NULL;
- }
-
- DRMINITLISTHEAD(&p->free);
- DRMINITLISTHEAD(&p->head);
- DRMINITLISTHEAD(&p->delayed);
-
- p->numTot = numBufs;
- p->numFree = numBufs;
- p->bufSize = bufSize;
- p->numDelayed = 0;
- p->checkDelayed = checkDelayed;
-
- _glthread_INIT_MUTEX(p->mutex);
-
- if (drmBOCreate(fd, 0, numBufs * bufSize, 0, NULL, drm_bo_type_dc,
- flags, DRM_BO_HINT_DONT_FENCE, &p->kernelBO)) {
- free(p->bufs);
- free(p);
- return NULL;
- }
- if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
- &p->virtual)) {
- drmBODestroy(fd, &p->kernelBO);
- free(p->bufs);
- free(p);
- return NULL;
- }
-
- /*
- * We unmap the buffer so that we can validate it later. Note that this is
- * just a synchronizing operation. The buffer will have a virtual mapping
- * until it is destroyed.
- */
-
- drmBOUnmap(fd, &p->kernelBO);
-
- buf = p->bufs;
- for (i = 0; i < numBufs; ++i) {
- buf->parent = p;
- buf->fence = NULL;
- buf->start = i * bufSize;
- buf->mapped = 0;
- buf->unfenced = 0;
- DRMLISTADDTAIL(&buf->head, &p->free);
- buf++;
- }
-
- return p;
-}
-
-
-static void
-pool_checkFree(BPool * p, int wait)
-{
- drmMMListHead *list, *prev;
- BBuf *buf;
- int signaled = 0;
- int i;
-
- list = p->delayed.next;
-
- if (p->numDelayed > 3) {
- for (i = 0; i < p->numDelayed; i += 3) {
- list = list->next;
- }
- }
-
- prev = list->prev;
- for (; list != &p->delayed; list = prev, prev = list->prev) {
-
- buf = DRMLISTENTRY(BBuf, list, head);
-
- if (!signaled) {
- if (wait) {
- driFenceFinish(buf->fence, DRM_FENCE_TYPE_EXE, 1);
- signaled = 1;
- }
- else {
- signaled = driFenceSignaled(buf->fence, DRM_FENCE_TYPE_EXE);
- }
- }
-
- if (!signaled)
- break;
-
- driFenceUnReference(buf->fence);
- buf->fence = NULL;
- DRMLISTDEL(list);
- p->numDelayed--;
- DRMLISTADD(list, &p->free);
- p->numFree++;
- }
-}
-
-static void *
-pool_create(struct _DriBufferPool *pool,
- unsigned long size, unsigned flags, unsigned hint,
- unsigned alignment)
-{
- BPool *p = (BPool *) pool->data;
-
- drmMMListHead *item;
-
- if (alignment && (alignment != 4096))
- return NULL;
-
- _glthread_LOCK_MUTEX(p->mutex);
-
- if (p->numFree == 0)
- pool_checkFree(p, TRUE);
-
- if (p->numFree == 0) {
- fprintf(stderr, "Out of fixed size buffer objects\n");
- BM_CKFATAL(-ENOMEM);
- }
-
- item = p->free.next;
-
- if (item == &p->free) {
- fprintf(stderr, "Fixed size buffer pool corruption\n");
- }
-
- DRMLISTDEL(item);
- --p->numFree;
-
- _glthread_UNLOCK_MUTEX(p->mutex);
- return (void *) DRMLISTENTRY(BBuf, item, head);
-}
-
-
-static int
-pool_destroy(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- _glthread_LOCK_MUTEX(p->mutex);
-
- if (buf->fence) {
- DRMLISTADDTAIL(&buf->head, &p->delayed);
- p->numDelayed++;
- }
- else {
- buf->unfenced = 0;
- DRMLISTADD(&buf->head, &p->free);
- p->numFree++;
- }
-
- if ((p->numDelayed % p->checkDelayed) == 0)
- pool_checkFree(p, 0);
-
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-
-static int
-pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
- int hint, void **virtual)
-{
-
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- _glthread_LOCK_MUTEX(p->mutex);
-
- /*
- * Currently Mesa doesn't have any condition variables to resolve this
- * cleanly in a multithreading environment.
- * We bail out instead.
- */
-
- if (buf->mapped) {
- fprintf(stderr, "Trying to map already mapped buffer object\n");
- BM_CKFATAL(-EINVAL);
- }
-
-#if 0
- if (buf->unfenced && !(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
- fprintf(stderr, "Trying to map an unfenced buffer object 0x%08x"
- " 0x%08x %d\n", hint, flags, buf->start);
- BM_CKFATAL(-EINVAL);
- }
-
-#endif
-
- if (buf->fence) {
- _glthread_UNLOCK_MUTEX(p->mutex);
- return -EBUSY;
- }
-
- buf->mapped = TRUE;
- *virtual = (unsigned char *) p->virtual + buf->start;
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-static int
-pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
-{
- BBuf *buf = (BBuf *) private;
- driFenceFinish(buf->fence, 0x0, lazy);
- return 0;
-}
-
-static int
-pool_unmap(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
-
- buf->mapped = 0;
- return 0;
-}
-
-static unsigned long
-pool_offset(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- return p->kernelBO.offset + buf->start;
-}
-
-static unsigned
-pool_flags(struct _DriBufferPool *pool, void *private)
-{
- BPool *p = (BPool *) pool->data;
-
- return p->kernelBO.flags;
-}
-
-static unsigned long
-pool_size(struct _DriBufferPool *pool, void *private)
-{
- BPool *p = (BPool *) pool->data;
-
- return p->bufSize;
-}
-
-
-static int
-pool_fence(struct _DriBufferPool *pool, void *private,
- struct _DriFenceObject *fence)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- _glthread_LOCK_MUTEX(p->mutex);
- if (buf->fence) {
- driFenceUnReference(buf->fence);
- }
- buf->fence = fence;
- buf->unfenced = 0;
- driFenceReference(buf->fence);
- _glthread_UNLOCK_MUTEX(p->mutex);
-
- return 0;
-}
-
-static drmBO *
-pool_kernel(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- return &p->kernelBO;
-}
-
-static int
-pool_validate(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
- _glthread_LOCK_MUTEX(p->mutex);
- buf->unfenced = TRUE;
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-static void
-pool_takedown(struct _DriBufferPool *pool)
-{
- BPool *p = (BPool *) pool->data;
-
- /*
- * Wait on outstanding fences.
- */
-
- _glthread_LOCK_MUTEX(p->mutex);
- while ((p->numFree < p->numTot) && p->numDelayed) {
- _glthread_UNLOCK_MUTEX(p->mutex);
- sched_yield();
- pool_checkFree(p, TRUE);
- _glthread_LOCK_MUTEX(p->mutex);
- }
-
- drmBODestroy(pool->fd, &p->kernelBO);
- free(p->bufs);
- _glthread_UNLOCK_MUTEX(p->mutex);
- free(p);
- free(pool);
-}
-
-
-struct _DriBufferPool *
-driBatchPoolInit(int fd, unsigned flags,
- unsigned long bufSize,
- unsigned numBufs, unsigned checkDelayed)
-{
- struct _DriBufferPool *pool;
-
- pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
- if (!pool)
- return NULL;
-
- pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed);
- if (!pool->data)
- return NULL;
-
- pool->fd = fd;
- pool->map = &pool_map;
- pool->unmap = &pool_unmap;
- pool->destroy = &pool_destroy;
- pool->offset = &pool_offset;
- pool->flags = &pool_flags;
- pool->size = &pool_size;
- pool->create = &pool_create;
- pool->fence = &pool_fence;
- pool->kernel = &pool_kernel;
- pool->validate = &pool_validate;
- pool->waitIdle = &pool_waitIdle;
- pool->setstatic = NULL;
- pool->takeDown = &pool_takedown;
- return pool;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef INTEL_BATCHPOOL_H
-#define INTEL_BATCHPOOL_H
-
-extern struct _DriBufferPool *driBatchPoolInit(int fd, unsigned flags,
- unsigned long bufSize,
- unsigned numBufs,
- unsigned checkDelayed);
-
-
-#endif /* INTEL_BATCHPOOL_H */
* memory pools
*/
DRM_LIGHT_LOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
+ // ZZZ JB should be per screen and not be done per context
havePools = intelCreatePools(sPriv);
DRM_UNLOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
if (!havePools)
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
- driFenceUnReference(intel->last_swap_fence);
+ driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
if (intel->first_swap_fence) {
driFenceFinish(intel->first_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
- driFenceUnReference(intel->first_swap_fence);
+ driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = NULL;
}
#ifndef INTEL_CONTEXT_H
#define INTEL_CONTEXT_H
-
+#include <stdint.h>
#include "drm.h"
#include "pipe/p_debug.h"
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+#include "mtypes.h"
+#include "context.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "drm.h"
+
+#define FILE_DEBUG_FLAG DEBUG_IOCTL
+
+int
+intelEmitIrqLocked(struct intel_context *intel)
+{
+ drmI830IrqEmit ie;
+ int ret, seq;
+
+ assert(((*(int *) intel->driHwLock) & ~DRM_LOCK_CONT) ==
+ (DRM_LOCK_HELD | intel->hHWContext));
+
+ ie.irq_seq = &seq;
+
+ ret = drmCommandWriteRead(intel->driFd, DRM_I830_IRQ_EMIT,
+ &ie, sizeof(ie));
+ if (ret) {
+ fprintf(stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret);
+ exit(1);
+ }
+
+ DBG("%s --> %d\n", __FUNCTION__, seq);
+
+ return seq;
+}
+
+void
+intelWaitIrq(struct intel_context *intel, int seq)
+{
+ int ret;
+
+ DBG("%s %d\n", __FUNCTION__, seq);
+
+ intel->iw.irq_seq = seq;
+
+ do {
+ ret =
+ drmCommandWrite(intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw,
+ sizeof(intel->iw));
+ } while (ret == -EAGAIN || ret == -EINTR);
+
+ if (ret) {
+ fprintf(stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret);
+ exit(1);
+ }
+}
+
+
+void
+intel_batch_ioctl(struct intel_context *intel,
+ GLuint start_offset,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
+{
+ drmI830BatchBuffer batch;
+
+ assert(intel->locked);
+ assert(used);
+
+ DBG("%s used %d offset %x..%x ignore_cliprects %d\n",
+ __FUNCTION__,
+ used, start_offset, start_offset + used, ignore_cliprects);
+
+ /* Throw away non-effective packets. Won't work once we have
+ * hardware contexts which would preserve statechanges beyond a
+ * single buffer.
+ */
+
+
+
+ batch.start = start_offset;
+ batch.used = used;
+ batch.cliprects = intel->pClipRects;
+ batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
+ batch.DR1 = 0;
+ batch.DR4 = ((((GLuint) intel->drawX) & 0xffff) |
+ (((GLuint) intel->drawY) << 16));
+
+ DBG("%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
+ __FUNCTION__,
+ batch.start,
+ batch.start + batch.used * 4, batch.DR4, batch.num_cliprects);
+
+ if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch,
+ sizeof(batch))) {
+ fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
+
+ /* FIXME: use hardware contexts to avoid 'losing' hardware after
+ * each buffer flush.
+ */
+ intel->vtbl.lost_hardware(intel);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_IOCTL_H
+#define INTEL_IOCTL_H
+
+#include "intel_context.h"
+
+void intelWaitIrq(struct intel_context *intel, int seq);
+int intelEmitIrqLocked(struct intel_context *intel);
+
+void intel_batch_ioctl(struct intel_context *intel,
+ GLuint start_offset,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock);
+#endif
#include "intel_context.h"
#include "intel_screen.h"
#include "intel_batchbuffer.h"
-#include "intel_batchpool.h"
+//#include "intel_batchpool.h"
#include "intel_swapbuffers.h"
#include "intel_winsys.h"
#include "i830_dri.h"
-#include "dri_bufpool.h"
+#include "ws_dri_bufpool.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
assert( sarea->front_size >=
intelScreen->front.pitch * intelScreen->front.height );
+#if 0 /* JB not important */
if (!sarea->front_handle)
return;
fprintf(stderr, "drmMap(frontbuffer) failed!\n");
return;
}
+#endif
+#if 0 /* JB */
if (intelScreen->staticPool) {
driGenBuffers(intelScreen->staticPool, "static region", 1,
&intelScreen->front.buffer, 64,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
-
+
driBOSetStatic(intelScreen->front.buffer,
intelScreen->front.offset,
intelScreen->front.pitch * intelScreen->front.height,
intelScreen->front.map, 0);
}
+#else
+ if (intelScreen->staticPool) {
+ if (intelScreen->front.buffer)
+ driBOUnReference(intelScreen->front.buffer);
+ driGenBuffers(intelScreen->staticPool, "front", 1, &intelScreen->front.buffer, 0, 0, 0);
+ driBOSetReferenced(intelScreen->front.buffer, sarea->front_bo_handle);
+ }
+#endif
}
boolean
intelCreatePools(__DRIscreenPrivate * sPriv)
{
- unsigned batchPoolSize = 1024*1024;
+ //unsigned batchPoolSize = 1024*1024;
struct intel_screen *intelScreen = intel_screen(sPriv);
if (intelScreen->havePools)
return GL_TRUE;
+#if 0 /* ZZZ JB fix this */
intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
if (!intelScreen->staticPool)
return GL_FALSE;
fprintf(stderr, "Failed to initialize batch pool - possible incorrect agpgart installed\n");
return GL_FALSE;
}
-
+#else
+ intelScreen->staticPool = driDRMPoolInit(sPriv->fd);
+ intelScreen->batchPool = driSlabPoolInit(sPriv->fd,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ 4096, //intelScreen->maxBatchSize,
+ 1, 40, 16*16384, 0,
+ intelScreen->fMan);
+#endif
intelScreen->havePools = GL_TRUE;
intelUpdateScreenRotation(sPriv, intelScreen->sarea);
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
- intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd);
+
+
+#if 1 // ZZZ JB
+ intelScreen->mgr = driFenceMgrTTMInit(sPriv->fd);
+ if (!intelScreen->mgr) {
+ fprintf(stderr, "Failed to create fence manager.\n");
+ return GL_FALSE;
+ }
+
+ intelScreen->fMan = driInitFreeSlabManager(10, 10);
+ if (!intelScreen->fMan) {
+ fprintf(stderr, "Failed to create free slab manager.\n");
+ return GL_FALSE;
+ }
+
+ if (!intelCreatePools(sPriv))
+ return GL_FALSE;
+#endif
+
+ intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd, intelScreen->fMan);
return GL_TRUE;
}
#include "dri_util.h"
#include "i830_common.h"
#include "xmlconfig.h"
-#include "dri_bufpool.h"
+#include "ws_dri_bufpool.h"
#include "pipe/p_compiler.h"
*/
struct intel_context *dummyContext;
+ /*
+ * New stuff form the i915tex integration
+ */
+ struct _DriFenceMgr *mgr;
+ struct _DriFreeSlabManager *fMan;
+ unsigned batch_id;
+
struct pipe_winsys *winsys;
};
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, TRUE);
- driFenceUnReference(intel->last_swap_fence);
+ driFenceUnReference(&intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
intel->last_swap_fence = intel->first_swap_fence;
}
if (intel->first_swap_fence)
- driFenceUnReference(intel->first_swap_fence);
+ driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
driFenceReference(intel->first_swap_fence);
}
struct _DriBufferObject;
struct pipe_winsys *
-intel_create_pipe_winsys( int fd );
+intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan );
void
intel_destroy_pipe_winsys( struct pipe_winsys *winsys );
#include <stdlib.h>
#include <xf86drm.h>
-#include "dri_bufpool.h"
-#include "dri_bufmgr.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_bufmgr.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
mask |= DRM_BO_FLAG_READ;
}
- intel_batchbuffer_emit_reloc( intel->batch,
+#if 0 /* JB old */
+ intel_batchbuffer_emit_reloc( intel->batch,
dri_bo( buf ),
- flags, mask,
+ flags, mask,
delta );
+#else /* new */
+ intel_offset_relocation( intel->batch,
+ delta,
+ dri_bo( buf ),
+ flags,
+ mask );
+#endif
}
fu.dri = intel_batchbuffer_flush( intel->batch );
- if (fu.dri)
- iws->pws->fence_reference(iws->pws, fence, fu.pipe);
+ if (!fu.dri && fence) {
+ *fence = NULL;
+ return;
+ }
+
+ if (fu.dri) {
+ if (fence)
+ *fence = fu.pipe;
+ else
+ iws->pws->fence_reference(iws->pws, &fu.dri, NULL);
+ }
+
// if (0) intel_i915_batch_wait_idle( sws );
}
#include <stdlib.h>
#include <xf86drm.h>
-#include "dri_bufpool.h"
-#include "dri_bufmgr.h"
+//#include "dri_bufpool.h"
+//#include "dri_bufmgr.h"
#include "intel_context.h"
#include "intel_winsys.h"
struct intel_pipe_winsys {
struct pipe_winsys winsys;
struct _DriBufferPool *regionPool;
+ struct _DriFreeSlabManager *fMan;
};
driGenBuffers( iws->regionPool,
"pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
- driBOData( buffer->driBO, size, NULL, 0 );
+ driBOData( buffer->driBO, size, NULL, iws->regionPool, 0 );
return &buffer->base;
}
struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
driGenUserBuffer( iws->regionPool,
- "pipe user buffer", &buffer->driBO, ptr, bytes);
+ "pipe user buffer", &buffer->driBO, ptr, bytes );
return &buffer->base;
}
unsigned flags)
{
const unsigned alignment = 64;
- int ret;
+ //int ret;
surf->width = width;
surf->height = height;
return "Intel/DRI/ttm";
}
+static void
+intel_fence_reference( struct pipe_winsys *sws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence )
+{
+ if (*ptr)
+ driFenceUnReference((struct _DriFenceObject **)ptr);
+
+ if (fence)
+ *ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
+}
+
+static int
+intel_fence_signalled( struct pipe_winsys *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ return driFenceSignaled((struct _DriFenceObject *)fence, flag);
+}
+
+static int
+intel_fence_finish( struct pipe_winsys *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ /* JB: Lets allways lazy wait */
+ return driFenceFinish((struct _DriFenceObject *)fence, flag, 1);
+}
struct pipe_winsys *
-intel_create_pipe_winsys( int fd )
+intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan )
{
struct intel_pipe_winsys *iws = CALLOC_STRUCT( intel_pipe_winsys );
iws->winsys.surface_alloc_storage = intel_i915_surface_alloc_storage;
iws->winsys.surface_release = intel_i915_surface_release;
+ iws->winsys.fence_reference = intel_fence_reference;
+ iws->winsys.fence_signalled = intel_fence_signalled;
+ iws->winsys.fence_finish = intel_fence_finish;
+
if (fd)
- iws->regionPool = driDRMPoolInit(fd);
+ iws->regionPool = driSlabPoolInit(fd,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ 64, 6, 16, 4096, 0,
+ fMan);
return &iws->winsys;
}
#define DRM_I830_DESTROY_HEAP 0x0c
#define DRM_I830_SET_VBLANK_PIPE 0x0d
#define DRM_I830_GET_VBLANK_PIPE 0x0e
+#define DRM_I830_MMIO 0x10
typedef struct {
enum {
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
+ /** Last context that used the buffer manager. */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
- int pipeA_x;
- int pipeA_y;
- int pipeA_w;
- int pipeA_h;
- int pipeB_x;
- int pipeB_y;
- int pipeB_w;
- int pipeB_h;
+ int planeA_x;
+ int planeA_y;
+ int planeA_w;
+ int planeA_h;
+ int planeB_x;
+ int planeB_y;
+ int planeB_w;
+ int planeB_h;
/* Triple buffering */
drm_handle_t third_handle;
int third_offset;
int third_size;
unsigned int third_tiled;
+
+ /* buffer object handles for the static buffers. May change
+ * over the lifetime of the client, though it doesn't in our current
+ * implementation.
+ */
+ unsigned int front_bo_handle;
+ unsigned int back_bo_handle;
+ unsigned int third_bo_handle;
+ unsigned int depth_bo_handle;
} drmI830Sarea;
/* Flags for perf_boxes
int pipe;
} drmI830VBlankPipe;
+#define MMIO_READ 0
+#define MMIO_WRITE 1
+
+#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
+#define MMIO_REGS_IA_VERTICES_COUNT 1
+#define MMIO_REGS_VS_INVOCATION_COUNT 2
+#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
+#define MMIO_REGS_GS_INVOCATION_COUNT 4
+#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
+#define MMIO_REGS_CL_INVOCATION_COUNT 6
+#define MMIO_REGS_PS_INVOCATION_COUNT 7
+#define MMIO_REGS_PS_DEPTH_COUNT 8
+
+typedef struct {
+ unsigned int read_write:1;
+ unsigned int reg:31;
+ void __user *data;
+} drmI830MMIO;
+
#endif /* _I830_DRM_H_ */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include "glthread.h"
+#include "errno.h"
+#include "ws_dri_bufmgr.h"
+#include "string.h"
+#include "imports.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+
+/*
+ * This lock is here to protect drmBO structs changing underneath us during a
+ * validate list call, since validatelist cannot take individiual locks for
+ * each drmBO. Validatelist takes this lock in write mode. Any access to an
+ * individual drmBO should take this lock in read mode, since in that case, the
+ * driBufferObject mutex will protect the access. Locking order is
+ * driBufferObject mutex - > this rw lock.
+ */
+
+_glthread_DECLARE_STATIC_MUTEX(bmMutex);
+_glthread_DECLARE_STATIC_COND(bmCond);
+
+static int kernelReaders = 0;
+
+static drmBO *drmBOListBuf(void *iterator)
+{
+ drmBONode *node;
+ drmMMListHead *l = (drmMMListHead *) iterator;
+ node = DRMLISTENTRY(drmBONode, l, head);
+ return node->buf;
+}
+
+static void *drmBOListIterator(drmBOList *list)
+{
+ void *ret = list->list.next;
+
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+static void *drmBOListNext(drmBOList *list, void *iterator)
+{
+ void *ret;
+
+ drmMMListHead *l = (drmMMListHead *) iterator;
+ ret = l->next;
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
+ uint64_t arg0,
+ uint64_t arg1)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->free.next;
+ if (l == &list->free) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ return NULL;
+ }
+ list->numCurrent++;
+ }
+ else {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ }
+ node->buf = item;
+ node->arg0 = arg0;
+ node->arg1 = arg1;
+ DRMLISTADD(&node->head, &list->list);
+ list->numOnList++;
+ return node;
+}
+
+static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
+ uint64_t mask, int *newItem)
+{
+ drmBONode *node, *cur;
+ drmMMListHead *l;
+
+ *newItem = 0;
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ if (node->buf == buf) {
+ cur = node;
+ break;
+ }
+ }
+ if (!cur) {
+ cur = drmAddListItem(list, buf, flags, mask);
+ if (!cur) {
+ return -ENOMEM;
+ }
+ *newItem = 1;
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ }
+ else {
+ uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
+ uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
+
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+
+ cur->arg1 |= mask;
+ cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
+
+ if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
+ (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void drmBOFreeList(drmBOList *list)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->list.next;
+ while(l != &list->list) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ l = list->list.next;
+ list->numCurrent--;
+ list->numOnList--;
+ }
+
+ l = list->free.next;
+ while(l != &list->free) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ l = list->free.next;
+ list->numCurrent--;
+ }
+}
+
+static int drmAdjustListNodes(drmBOList *list)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+ int ret = 0;
+
+ while(list->numCurrent < list->numTarget) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ list->numCurrent++;
+ DRMLISTADD(&node->head, &list->free);
+ }
+
+ while(list->numCurrent > list->numTarget) {
+ l = list->free.next;
+ if (l == &list->free)
+ break;
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ list->numCurrent--;
+ }
+ return ret;
+}
+
+static int drmBOCreateList(int numTarget, drmBOList *list)
+{
+ DRMINITLISTHEAD(&list->list);
+ DRMINITLISTHEAD(&list->free);
+ list->numTarget = numTarget;
+ list->numCurrent = 0;
+ list->numOnList = 0;
+ return drmAdjustListNodes(list);
+}
+
+static int drmBOResetList(drmBOList *list)
+{
+ drmMMListHead *l;
+ int ret;
+
+ ret = drmAdjustListNodes(list);
+ if (ret)
+ return ret;
+
+ l = list->list.next;
+ while (l != &list->list) {
+ DRMLISTDEL(l);
+ DRMLISTADD(l, &list->free);
+ list->numOnList--;
+ l = list->list.next;
+ }
+ return drmAdjustListNodes(list);
+}
+
+void driWriteLockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ while(kernelReaders != 0)
+ _glthread_COND_WAIT(bmCond, bmMutex);
+}
+
+void driWriteUnlockKernelBO(void)
+{
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void driReadLockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ kernelReaders++;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void driReadUnlockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ if (--kernelReaders == 0)
+ _glthread_COND_BROADCAST(bmCond);
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+
+
+
+/*
+ * TODO: Introduce fence pools in the same way as
+ * buffer object pools.
+ */
+
+typedef struct _DriBufferObject
+{
+ DriBufferPool *pool;
+ _glthread_Mutex mutex;
+ int refCount;
+ const char *name;
+ uint64_t flags;
+ unsigned hint;
+ unsigned alignment;
+ unsigned createdByReference;
+ void *private;
+ /* user-space buffer: */
+ unsigned userBuffer;
+ void *userData;
+ unsigned userSize;
+} DriBufferObject;
+
+typedef struct _DriBufferList {
+ drmBOList drmBuffers; /* List of kernel buffers needing validation */
+ drmBOList driBuffers; /* List of user-space buffers needing validation */
+} DriBufferList;
+
+
+void
+bmError(int val, const char *file, const char *function, int line)
+{
+ _mesa_printf("Fatal video memory manager error \"%s\".\n"
+ "Check kernel logs or set the LIBGL_DEBUG\n"
+ "environment variable to \"verbose\" for more info.\n"
+ "Detected in file %s, line %d, function %s.\n",
+ strerror(-val), file, line, function);
+#ifndef NDEBUG
+ abort();
+#else
+ abort();
+#endif
+}
+
+extern drmBO *
+driBOKernel(struct _DriBufferObject *buf)
+{
+ drmBO *ret;
+
+ driReadLockKernelBO();
+ _glthread_LOCK_MUTEX(buf->mutex);
+ assert(buf->private != NULL);
+ ret = buf->pool->kernel(buf->pool, buf->private);
+ if (!ret)
+ BM_CKFATAL(-EINVAL);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+void
+driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
+{
+
+ /*
+ * This function may block. Is it sane to keep the mutex held during
+ * that time??
+ */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void *
+driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
+{
+ void *virtual;
+ int retval;
+
+ if (buf->userBuffer) {
+ return buf->userData;
+ }
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ assert(buf->private != NULL);
+ retval = buf->pool->map(buf->pool, buf->private, flags, hint,
+ &buf->mutex, &virtual);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return retval == 0 ? virtual : NULL;
+}
+
+void
+driBOUnmap(struct _DriBufferObject *buf)
+{
+ if (buf->userBuffer)
+ return;
+
+ assert(buf->private != NULL);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+unsigned long
+driBOOffset(struct _DriBufferObject *buf)
+{
+ unsigned long ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->offset(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+unsigned long
+driBOPoolOffset(struct _DriBufferObject *buf)
+{
+ unsigned long ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->poolOffset(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+uint64_t
+driBOFlags(struct _DriBufferObject *buf)
+{
+ uint64_t ret;
+
+ assert(buf->private != NULL);
+
+ driReadLockKernelBO();
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->flags(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+struct _DriBufferObject *
+driBOReference(struct _DriBufferObject *buf)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (++buf->refCount == 1) {
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(-EINVAL);
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return buf;
+}
+
+void
+driBOUnReference(struct _DriBufferObject *buf)
+{
+ int tmp;
+
+ if (!buf)
+ return;
+
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ tmp = --buf->refCount;
+ if (!tmp) {
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ if (buf->private) {
+ if (buf->createdByReference)
+ buf->pool->unreference(buf->pool, buf->private);
+ else
+ buf->pool->destroy(buf->pool, buf->private);
+ }
+ free(buf);
+ } else
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+}
+
+
+int
+driBOData(struct _DriBufferObject *buf,
+ unsigned size, const void *data,
+ DriBufferPool *newPool,
+ uint64_t flags)
+{
+ void *virtual = NULL;
+ int newBuffer;
+ int retval = 0;
+ struct _DriBufferPool *pool;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ pool = buf->pool;
+
+ if (pool == NULL && newPool != NULL) {
+ buf->pool = newPool;
+ pool = newPool;
+ }
+ if (newPool == NULL)
+ newPool = pool;
+
+ if (!pool->create) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "driBOData called on invalid buffer\n");
+ BM_CKFATAL(-EINVAL);
+ }
+
+ newBuffer = (!buf->private || pool != newPool ||
+ pool->size(pool, buf->private) < size);
+
+ if (!flags)
+ flags = buf->flags;
+
+ if (newBuffer) {
+
+ if (buf->createdByReference) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "driBOData requiring resizing called on "
+ "shared buffer.\n");
+ BM_CKFATAL(-EINVAL);
+ }
+
+ if (buf->private)
+ buf->pool->destroy(buf->pool, buf->private);
+
+ pool = newPool;
+ buf->pool = newPool;
+ buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
+ buf->alignment);
+ if (!buf->private)
+ retval = -ENOMEM;
+
+ if (retval == 0)
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
+ } else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
+ /*
+ * Buffer is busy. need to create a new one.
+ */
+
+ void *newBuf;
+
+ newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
+ buf->alignment);
+ if (newBuf) {
+ buf->pool->destroy(buf->pool, buf->private);
+ buf->private = newBuf;
+ }
+
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
+ } else {
+ uint64_t flag_diff = flags ^ buf->flags;
+
+ /*
+ * We might need to change buffer flags.
+ */
+
+ if (flag_diff){
+ assert(pool->setStatus != NULL);
+ BM_CKFATAL(pool->unmap(pool, buf->private));
+ BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
+ buf->flags));
+ if (!data)
+ goto out;
+
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
+ }
+ }
+
+ if (retval == 0) {
+ if (data)
+ memcpy(virtual, data, size);
+
+ BM_CKFATAL(pool->unmap(pool, buf->private));
+ }
+
+ out:
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return retval;
+}
+
+void
+driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, const void *data)
+{
+ void *virtual;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex,
+ &virtual));
+ memcpy((unsigned char *) virtual + offset, data, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, void *data)
+{
+ void *virtual;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
+ memcpy(data, (unsigned char *) virtual + offset, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOSetReferenced(struct _DriBufferObject *buf,
+ unsigned long handle)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->private != NULL) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "Invalid buffer for setReferenced\n");
+ BM_CKFATAL(-EINVAL);
+
+ }
+ if (buf->pool->reference == NULL) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "Invalid buffer pool for setReferenced\n");
+ BM_CKFATAL(-EINVAL);
+ }
+ buf->private = buf->pool->reference(buf->pool, handle);
+ if (!buf->private) {
+ _mesa_error(NULL, GL_OUT_OF_MEMORY,
+ "Invalid buffer pool for setStatic\n");
+ BM_CKFATAL(-ENOMEM);
+ }
+ buf->createdByReference = GL_TRUE;
+ buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+int
+driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, uint64_t flags, unsigned hint)
+{
+ struct _DriBufferObject *buf;
+ int i;
+
+ flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
+
+
+ assert(pool);
+
+ for (i = 0; i < n; ++i) {
+ buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
+ if (!buf)
+ return -ENOMEM;
+
+ _glthread_INIT_MUTEX(buf->mutex);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ buf->refCount = 1;
+ buf->flags = flags;
+ buf->hint = hint;
+ buf->name = name;
+ buf->alignment = alignment;
+ buf->pool = pool;
+ buf->createdByReference = 0;
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ buffers[i] = buf;
+ }
+ return 0;
+}
+
+void
+driGenUserBuffer(struct _DriBufferPool *pool,
+ const char *name,
+ struct _DriBufferObject **buffers,
+ void *ptr, unsigned bytes)
+{
+ const unsigned alignment = 1, flags = 0, hint = 0;
+
+ driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
+
+ (*buffers)->userBuffer = 1;
+ (*buffers)->userData = ptr;
+ (*buffers)->userSize = bytes;
+}
+
+void
+driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ driBOUnReference(buffers[i]);
+ }
+}
+
+
+void
+driInitBufMgr(int fd)
+{
+ ;
+}
+
+/*
+ * Note that lists are per-context and don't need mutex protection.
+ */
+
+struct _DriBufferList *
+driBOCreateList(int target)
+{
+ struct _DriBufferList *list = calloc(sizeof(*list), 1);
+
+ BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
+ BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
+ return list;
+}
+
+int
+driBOResetList(struct _DriBufferList * list)
+{
+ int ret;
+ ret = drmBOResetList(&list->drmBuffers);
+ if (ret)
+ return ret;
+ ret = drmBOResetList(&list->driBuffers);
+ return ret;
+}
+
+void
+driBOFreeList(struct _DriBufferList * list)
+{
+ drmBOFreeList(&list->drmBuffers);
+ drmBOFreeList(&list->driBuffers);
+ free(list);
+}
+
+
+/*
+ * Copied from libdrm, because it is needed by driAddValidateItem.
+ */
+
+static drmBONode *
+driAddListItem(drmBOList * list, drmBO * item,
+ uint64_t arg0, uint64_t arg1)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->free.next;
+ if (l == &list->free) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ return NULL;
+ }
+ list->numCurrent++;
+ } else {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ }
+ memset(&node->bo_arg, 0, sizeof(node->bo_arg));
+ node->buf = item;
+ node->arg0 = arg0;
+ node->arg1 = arg1;
+ DRMLISTADDTAIL(&node->head, &list->list);
+ list->numOnList++;
+ return node;
+}
+
+/*
+ * Slightly modified version compared to the libdrm version.
+ * This one returns the list index of the buffer put on the list.
+ */
+
+static int
+driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
+ uint64_t mask, int *itemLoc,
+ struct _drmBONode **pnode)
+{
+ drmBONode *node, *cur;
+ drmMMListHead *l;
+ int count = 0;
+
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ if (node->buf == buf) {
+ cur = node;
+ break;
+ }
+ count++;
+ }
+ if (!cur) {
+ cur = driAddListItem(list, buf, flags, mask);
+ if (!cur)
+ return -ENOMEM;
+
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ } else {
+ uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
+ uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
+
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+
+ cur->arg1 |= mask;
+ cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
+
+ if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
+ (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
+ return -EINVAL;
+ }
+ }
+ *itemLoc = count;
+ *pnode = cur;
+ return 0;
+}
+
+
+void
+driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _drmBONode **node)
+{
+ int newItem;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
+ buf->pool->kernel(buf->pool, buf->private),
+ flags, mask, itemLoc, node));
+ BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
+ flags, mask, &newItem));
+ if (newItem)
+ buf->refCount++;
+
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+drmBOList *driGetdrmBOList(struct _DriBufferList *list)
+{
+ driWriteLockKernelBO();
+ return &list->drmBuffers;
+}
+
+void driPutdrmBOList(struct _DriBufferList *list)
+{
+ driWriteUnlockKernelBO();
+}
+
+
+void
+driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->pool->fence)
+ BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+}
+
+void
+driBOUnrefUserList(struct _DriBufferList *list)
+{
+ struct _DriBufferObject *buf;
+ void *curBuf;
+
+ curBuf = drmBOListIterator(&list->driBuffers);
+ while (curBuf) {
+ buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
+ driBOUnReference(buf);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+}
+
+struct _DriFenceObject *
+driBOFenceUserList(struct _DriFenceMgr *mgr,
+ struct _DriBufferList *list, const char *name,
+ drmFence *kFence)
+{
+ struct _DriFenceObject *fence;
+ struct _DriBufferObject *buf;
+ void *curBuf;
+
+ fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
+ kFence, sizeof(*kFence));
+ curBuf = drmBOListIterator(&list->driBuffers);
+
+ /*
+ * User-space fencing callbacks.
+ */
+
+ while (curBuf) {
+ buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
+ driBOFence(buf, fence);
+ driBOUnReference(buf);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+
+ driBOResetList(list);
+ return fence;
+}
+
+void
+driBOValidateUserList(struct _DriBufferList * list)
+{
+ void *curBuf;
+ struct _DriBufferObject *buf;
+
+ curBuf = drmBOListIterator(&list->driBuffers);
+
+ /*
+ * User-space validation callbacks.
+ */
+
+ while (curBuf) {
+ buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->pool->validate)
+ BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+}
+
+
+void
+driPoolTakeDown(struct _DriBufferPool *pool)
+{
+ pool->takeDown(pool);
+
+}
+
+unsigned long
+driBOSize(struct _DriBufferObject *buf)
+{
+ unsigned long size;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ size = buf->pool->size(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return size;
+
+}
+
+drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
+{
+ return &list->drmBuffers;
+}
+
+drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
+{
+ return &list->driBuffers;
+}
+
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _PSB_BUFMGR_H_
+#define _PSB_BUFMGR_H_
+#include <xf86mm.h>
+#include "i915_drm.h"
+#include "ws_dri_fencemgr.h"
+
+typedef struct _drmBONode
+{
+ drmMMListHead head;
+ drmBO *buf;
+ struct drm_i915_op_arg bo_arg;
+ uint64_t arg0;
+ uint64_t arg1;
+} drmBONode;
+
+typedef struct _drmBOList {
+ unsigned numTarget;
+ unsigned numCurrent;
+ unsigned numOnList;
+ drmMMListHead list;
+ drmMMListHead free;
+} drmBOList;
+
+
+struct _DriFenceObject;
+struct _DriBufferObject;
+struct _DriBufferPool;
+struct _DriBufferList;
+
+/*
+ * Return a pointer to the libdrm buffer object this DriBufferObject
+ * uses.
+ */
+
+extern drmBO *driBOKernel(struct _DriBufferObject *buf);
+extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
+ unsigned hint);
+extern void driBOUnmap(struct _DriBufferObject *buf);
+extern unsigned long driBOOffset(struct _DriBufferObject *buf);
+extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
+
+extern uint64_t driBOFlags(struct _DriBufferObject *buf);
+extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
+extern void driBOUnReference(struct _DriBufferObject *buf);
+
+extern int driBOData(struct _DriBufferObject *r_buf,
+ unsigned size, const void *data,
+ struct _DriBufferPool *pool, uint64_t flags);
+
+extern void driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ const void *data);
+extern void driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ void *data);
+extern int driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, uint64_t flags, unsigned hint);
+extern void driGenUserBuffer(struct _DriBufferPool *pool,
+ const char *name,
+ struct _DriBufferObject *buffers[],
+ void *ptr, unsigned bytes);
+extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
+extern void driInitBufMgr(int fd);
+extern struct _DriBufferList *driBOCreateList(int target);
+extern int driBOResetList(struct _DriBufferList * list);
+extern void driBOAddListItem(struct _DriBufferList * list,
+ struct _DriBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _drmBONode **node);
+
+extern void driBOValidateList(int fd, struct _DriBufferList * list);
+extern void driBOFreeList(struct _DriBufferList * list);
+extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
+ struct _DriBufferList *list,
+ const char *name,
+ drmFence *kFence);
+extern void driBOUnrefUserList(struct _DriBufferList *list);
+extern void driBOValidateUserList(struct _DriBufferList * list);
+extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
+extern void driPutdrmBOList(struct _DriBufferList *list);
+
+extern void driBOFence(struct _DriBufferObject *buf,
+ struct _DriFenceObject *fence);
+
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+extern void driBOSetReferenced(struct _DriBufferObject *buf,
+ unsigned long handle);
+unsigned long driBOSize(struct _DriBufferObject *buf);
+extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+
+extern void driReadLockKernelBO(void);
+extern void driReadUnlockKernelBO(void);
+extern void driWriteLockKernelBO(void);
+extern void driWriteUnlockKernelBO(void);
+
+/*
+ * For debugging purposes.
+ */
+
+extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
+extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _PSB_BUFPOOL_H_
+#define _PSB_BUFPOOL_H_
+
+#include <xf86drm.h>
+#include <glthread.h>
+struct _DriFenceObject;
+
+typedef struct _DriBufferPool
+{
+ int fd;
+ int (*map) (struct _DriBufferPool * pool, void *private,
+ unsigned flags, int hint, _glthread_Mutex *mutex,
+ void **virtual);
+ int (*unmap) (struct _DriBufferPool * pool, void *private);
+ int (*destroy) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
+ uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*size) (struct _DriBufferPool * pool, void *private);
+ void *(*create) (struct _DriBufferPool * pool, unsigned long size,
+ uint64_t flags, unsigned hint, unsigned alignment);
+ void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
+ int (*unreference) (struct _DriBufferPool * pool, void *private);
+ int (*fence) (struct _DriBufferPool * pool, void *private,
+ struct _DriFenceObject * fence);
+ drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
+ int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
+ int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
+ int lazy);
+ int (*setStatus) (struct _DriBufferPool *pool, void *private,
+ uint64_t flag_diff, uint64_t old_flags);
+ void (*takeDown) (struct _DriBufferPool * pool);
+ void *data;
+} DriBufferPool;
+
+extern void bmError(int val, const char *file, const char *function,
+ int line);
+#define BM_CKFATAL(val) \
+ do{ \
+ int tstVal = (val); \
+ if (tstVal) \
+ bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
+ } while(0);
+
+
+/*
+ * Builtin pools.
+ */
+
+/*
+ * Kernel buffer objects. Size in multiples of page size. Page size aligned.
+ */
+
+extern struct _DriBufferPool *driDRMPoolInit(int fd);
+extern struct _DriBufferPool *driMallocPoolInit(void);
+
+struct _DriFreeSlabManager;
+extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan);
+extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
+extern struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "ws_dri_bufpool.h"
+#include "ws_dri_bufmgr.h"
+#include "assert.h"
+
+/*
+ * Buffer pool implementation using DRM buffer objects as DRI buffer objects.
+ */
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, uint64_t flags, unsigned hint,
+ unsigned alignment)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+ unsigned pageSize = getpagesize();
+
+ if (!buf)
+ return NULL;
+
+ if ((alignment > pageSize) && (alignment % pageSize)) {
+ free(buf);
+ return NULL;
+ }
+
+ ret = drmBOCreate(pool->fd, size, alignment / pageSize,
+ NULL,
+ flags, hint, buf);
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ return (void *) buf;
+}
+
+static void *
+pool_reference(struct _DriBufferPool *pool, unsigned handle)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+
+ if (!buf)
+ return NULL;
+
+ ret = drmBOReference(pool->fd, handle, buf);
+
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ return (void *) buf;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ int ret;
+ drmBO *buf = (drmBO *) private;
+ driReadLockKernelBO();
+ ret = drmBOUnreference(pool->fd, buf);
+ free(buf);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_unreference(struct _DriBufferPool *pool, void *private)
+{
+ int ret;
+ drmBO *buf = (drmBO *) private;
+ driReadLockKernelBO();
+ ret = drmBOUnreference(pool->fd, buf);
+ free(buf);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOUnmap(pool->fd, buf);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ unsigned long offset;
+
+ driReadLockKernelBO();
+ assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
+ offset = buf->offset;
+ driReadUnlockKernelBO();
+
+ return buf->offset;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ return 0;
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ uint64_t flags;
+
+ driReadLockKernelBO();
+ flags = buf->flags;
+ driReadUnlockKernelBO();
+
+ return flags;
+}
+
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ unsigned long size;
+
+ driReadLockKernelBO();
+ size = buf->size;
+ driReadUnlockKernelBO();
+
+ return buf->size;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ /*
+ * Noop. The kernel handles all fencing.
+ */
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ return (drmBO *) private;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
+ int lazy)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ free(pool);
+}
+
+/*static int
+pool_setStatus(struct _DriBufferPool *pool, void *private,
+ uint64_t flag_diff, uint64_t old_flags)
+{
+ drmBO *buf = (drmBO *) private;
+ uint64_t new_flags = old_flags ^ flag_diff;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
+ 0, 0, 0);
+ driReadUnlockKernelBO();
+ return ret;
+}*/
+
+struct _DriBufferPool *
+driDRMPoolInit(int fd)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+
+ if (!pool)
+ return NULL;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->reference = &pool_reference;
+ pool->unreference = &pool_unreference;
+ pool->data = NULL;
+ return pool;
+}
--- /dev/null
+#include "ws_dri_fencemgr.h"
+#include "glthread.h"
+#include <xf86mm.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Note: Locking order is
+ * _DriFenceObject::mutex
+ * _DriFenceMgr::mutex
+ */
+
+struct _DriFenceMgr {
+ /*
+ * Constant members. Need no mutex protection.
+ */
+ struct _DriFenceMgrCreateInfo info;
+ void *private;
+
+ /*
+ * These members are protected by this->mutex
+ */
+ _glthread_Mutex mutex;
+ int refCount;
+ drmMMListHead *heads;
+};
+
+struct _DriFenceObject {
+
+ /*
+ * These members are constant and need no mutex protection.
+ */
+ struct _DriFenceMgr *mgr;
+ uint32_t fence_class;
+ uint32_t fence_type;
+
+ /*
+ * These members are protected by mgr->mutex.
+ */
+ drmMMListHead head;
+ int refCount;
+
+ /*
+ * These members are protected by this->mutex.
+ */
+ _glthread_Mutex mutex;
+ uint32_t signaled_type;
+ void *private;
+};
+
+uint32_t
+driFenceType(struct _DriFenceObject *fence)
+{
+ return fence->fence_type;
+}
+
+struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
+{
+ struct _DriFenceMgr *tmp;
+ uint32_t i;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->refCount = 1;
+ tmp->info = *info;
+ tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
+ if (!tmp->heads)
+ goto out_err;
+
+ for (i=0; i<tmp->info.num_classes; ++i) {
+ DRMINITLISTHEAD(&tmp->heads[i]);
+ }
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+ return tmp;
+
+ out_err:
+ if (tmp)
+ free(tmp);
+ return NULL;
+}
+
+static void
+driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
+{
+ struct _DriFenceMgr *mgr = *pMgr;
+
+ *pMgr = NULL;
+ if (--mgr->refCount == 0)
+ free(mgr);
+ else
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
+{
+ _glthread_LOCK_MUTEX((*pMgr)->mutex);
+ driFenceMgrUnrefUnlock(pMgr);
+}
+
+static void
+driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceObject *fence = *pFence;
+ struct _DriFenceMgr *mgr = fence->mgr;
+
+ *pFence = NULL;
+ if (--fence->refCount == 0) {
+ DRMLISTDELINIT(&fence->head);
+ if (fence->private)
+ mgr->info.unreference(mgr, &fence->private);
+ fence->mgr = NULL;
+ --mgr->refCount;
+ free(fence);
+ }
+}
+
+
+static void
+driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
+ drmMMListHead *list,
+ uint32_t fence_class,
+ uint32_t fence_type)
+{
+ struct _DriFenceObject *entry;
+ drmMMListHead *prev;
+
+ while(list != &mgr->heads[fence_class]) {
+ entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
+
+ /*
+ * Up refcount so that entry doesn't disappear from under us
+ * when we unlock-relock mgr to get the correct locking order.
+ */
+
+ ++entry->refCount;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ _glthread_LOCK_MUTEX(entry->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+
+ prev = list->prev;
+
+
+
+ if (list->prev == list) {
+
+ /*
+ * Somebody else removed the entry from the list.
+ */
+
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ return;
+ }
+
+ entry->signaled_type |= (fence_type & entry->fence_type);
+ if (entry->signaled_type == entry->fence_type) {
+ DRMLISTDELINIT(list);
+ mgr->info.unreference(mgr, &entry->private);
+ }
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ list = prev;
+ }
+}
+
+
+int
+driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint)
+{
+ struct _DriFenceMgr *mgr = fence->mgr;
+ int ret = 0;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+
+ if ((fence->signaled_type & fence_type) == fence_type)
+ goto out0;
+
+ ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
+ if (ret)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ fence_type);
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
+{
+ uint32_t ret;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = fence->signaled_type;
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ return ret;
+}
+
+int
+driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
+ uint32_t *signaled)
+{
+ int ret = 0;
+ struct _DriFenceMgr *mgr;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ mgr = fence->mgr;
+ *signaled = fence->signaled_type;
+ if ((fence->signaled_type & flush_type) == flush_type)
+ goto out0;
+
+ ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
+ if (ret) {
+ *signaled = fence->signaled_type;
+ goto out0;
+ }
+
+ if ((fence->signaled_type | *signaled) == fence->signaled_type)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ *signaled);
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+struct _DriFenceObject *
+driFenceReference(struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(fence->mgr->mutex);
+ ++fence->refCount;
+ _glthread_UNLOCK_MUTEX(fence->mgr->mutex);
+ return fence;
+}
+
+void
+driFenceUnReference(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceMgr *mgr;
+
+ if (*pFence == NULL)
+ return;
+
+ mgr = (*pFence)->mgr;
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ ++mgr->refCount;
+ driFenceUnReferenceLocked(pFence);
+ driFenceMgrUnrefUnlock(&mgr);
+}
+
+struct _DriFenceObject
+*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
+ uint32_t fence_type, void *private, size_t private_size)
+{
+ struct _DriFenceObject *fence;
+ size_t fence_size = sizeof(*fence);
+
+ if (private_size)
+ fence_size = ((fence_size + 15) & ~15);
+
+ fence = calloc(1, fence_size + private_size);
+
+ if (!fence) {
+ int ret = mgr->info.finish(mgr, private, fence_type, 0);
+
+ if (ret)
+ usleep(10000000);
+
+ return NULL;
+ }
+
+ _glthread_INIT_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ fence->refCount = 1;
+ DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
+ fence->mgr = mgr;
+ ++mgr->refCount;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ fence->fence_class = fence_class;
+ fence->fence_type = fence_type;
+ fence->signaled_type = 0;
+ fence->private = private;
+ if (private_size) {
+ fence->private = (void *)(((uint8_t *) fence) + fence_size);
+ memcpy(fence->private, private, private_size);
+ }
+
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return fence;
+}
+
+
+static int
+tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type)
+{
+ long fd = (long) mgr->private;
+ int dummy;
+ drmFence *fence = (drmFence *) private;
+ int ret;
+
+ *signaled_type = 0;
+ ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
+ if (ret)
+ return ret;
+
+ *signaled_type = fence->signaled;
+
+ return 0;
+}
+
+static int
+tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
+ int lazy_hint)
+{
+ long fd = (long) mgr->private;
+ unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
+
+ return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
+}
+
+static int
+tUnref(struct _DriFenceMgr *mgr, void **private)
+{
+ long fd = (long) mgr->private;
+ drmFence *fence = (drmFence *) *private;
+ *private = NULL;
+
+ return drmFenceUnreference(fd, fence);
+}
+
+struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
+{
+ struct _DriFenceMgrCreateInfo info;
+ struct _DriFenceMgr *mgr;
+
+ info.flags = DRI_FENCE_CLASS_ORDERED;
+ info.num_classes = 4;
+ info.signaled = tSignaled;
+ info.finish = tFinish;
+ info.unreference = tUnref;
+
+ mgr = driFenceMgrCreate(&info);
+ if (mgr == NULL)
+ return NULL;
+
+ mgr->private = (void *) (long) fd;
+ return mgr;
+}
+
--- /dev/null
+#ifndef DRI_FENCEMGR_H
+#define DRI_FENCEMGR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+struct _DriFenceObject;
+struct _DriFenceMgr;
+
+/*
+ * Do a quick check to see if the fence manager has registered the fence
+ * object as signaled. Note that this function may return a false negative
+ * answer.
+ */
+extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
+
+/*
+ * Check if the fence object is signaled. This function can be substantially
+ * more expensive to call than the above function, but will not return a false
+ * negative answer. The argument "flush_type" sets the types that the
+ * underlying mechanism must make sure will eventually signal.
+ */
+extern int driFenceSignaledType(struct _DriFenceObject *fence,
+ uint32_t flush_type, uint32_t *signaled);
+
+/*
+ * Convenience functions.
+ */
+
+static inline int driFenceSignaled(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types;
+ int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
+ if (ret)
+ return 0;
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types =
+ driFenceSignaledTypeCached(fence);
+
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+/*
+ * Reference a fence object.
+ */
+extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
+
+/*
+ * Unreference a fence object. The fence object pointer will be reset to NULL.
+ */
+
+extern void driFenceUnReference(struct _DriFenceObject **pFence);
+
+
+/*
+ * Wait for a fence to signal the indicated fence_type.
+ * If "lazy_hint" is true, it indicates that the wait may sleep to avoid
+ * busy-wait polling.
+ */
+extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint);
+
+/*
+ * Create a DriFenceObject for manager "mgr".
+ *
+ * "private" is a pointer that should be used for the callbacks in
+ * struct _DriFenceMgrCreateInfo.
+ *
+ * if private_size is nonzero, then the info stored at *private, with size
+ * private size will be copied and the fence manager will instead use a
+ * pointer to the copied data for the callbacks in
+ * struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
+ * "private" may be destroyed after the call to driFenceCreate.
+ */
+extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ void *private,
+ size_t private_size);
+
+extern uint32_t driFenceType(struct _DriFenceObject *fence);
+
+/*
+ * Fence creations are ordered. If a fence signals a fence_type,
+ * it is safe to assume that all fences of the same class that was
+ * created before that fence has signaled the same type.
+ */
+
+#define DRI_FENCE_CLASS_ORDERED (1 << 0)
+
+struct _DriFenceMgrCreateInfo {
+ uint32_t flags;
+ uint32_t num_classes;
+ int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type);
+ int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
+ int (*unreference) (struct _DriFenceMgr *mgr, void **private);
+};
+
+extern struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
+
+extern struct _DriFenceMgr *
+driFenceMgrTTMInit(int fd);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <errno.h>
+#include "imports.h"
+#include "glthread.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_bufmgr.h"
+#include "intel_screen.h"
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, uint64_t flags, unsigned hint,
+ unsigned alignment)
+{
+ unsigned long *private = malloc(size + 2*sizeof(unsigned long));
+ if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
+ abort();
+
+ *private = size;
+ return (void *)private;
+}
+
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ free(private);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private,
+ _glthread_Mutex *mutex, int lazy)
+{
+ return 0;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ *virtual = (void *)((unsigned long *)private + 2);
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ /*
+ * BUG
+ */
+ abort();
+ return 0UL;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ /*
+ * BUG
+ */
+ abort();
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ return *(unsigned long *) private;
+}
+
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ abort();
+ return 0UL;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ abort();
+ return NULL;
+}
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ free(pool);
+}
+
+
+struct _DriBufferPool *
+driMallocPoolInit(void)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ pool->data = NULL;
+ pool->fd = -1;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ return pool;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+#include "ws_dri_bufmgr.h"
+#include "glthread.h"
+
+#define DRI_SLABPOOL_ALLOC_RETRIES 100
+
+struct _DriSlab;
+
+struct _DriSlabBuffer {
+ int isSlabBuffer;
+ drmBO *bo;
+ struct _DriFenceObject *fence;
+ struct _DriSlab *parent;
+ drmMMListHead head;
+ uint32_t mapCount;
+ uint32_t start;
+ uint32_t fenceType;
+ int unFenced;
+ _glthread_Cond event;
+};
+
+struct _DriKernelBO {
+ int fd;
+ drmBO bo;
+ drmMMListHead timeoutHead;
+ drmMMListHead head;
+ struct timeval timeFreed;
+ uint32_t pageAlignment;
+ void *virtual;
+};
+
+struct _DriSlab{
+ drmMMListHead head;
+ drmMMListHead freeBuffers;
+ uint32_t numBuffers;
+ uint32_t numFree;
+ struct _DriSlabBuffer *buffers;
+ struct _DriSlabSizeHeader *header;
+ struct _DriKernelBO *kbo;
+};
+
+
+struct _DriSlabSizeHeader {
+ drmMMListHead slabs;
+ drmMMListHead freeSlabs;
+ drmMMListHead delayedBuffers;
+ uint32_t numDelayed;
+ struct _DriSlabPool *slabPool;
+ uint32_t bufSize;
+ _glthread_Mutex mutex;
+};
+
+struct _DriFreeSlabManager {
+ struct timeval slabTimeout;
+ struct timeval checkInterval;
+ struct timeval nextCheck;
+ drmMMListHead timeoutList;
+ drmMMListHead unCached;
+ drmMMListHead cached;
+ _glthread_Mutex mutex;
+};
+
+
+struct _DriSlabPool {
+
+ /*
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+
+ struct _DriFreeSlabManager *fMan;
+ uint64_t proposedFlags;
+ uint64_t validMask;
+ uint32_t *bucketSizes;
+ uint32_t numBuckets;
+ uint32_t pageSize;
+ int fd;
+ int pageAlignment;
+ int maxSlabSize;
+ int desiredNumBuffers;
+ struct _DriSlabSizeHeader *headers;
+};
+
+/*
+ * FIXME: Perhaps arrange timeout slabs in size buckets for fast
+ * retreival??
+ */
+
+
+static inline int
+driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
+{
+ return ((arg1->tv_sec > arg2->tv_sec) ||
+ ((arg1->tv_sec == arg2->tv_sec) &&
+ (arg1->tv_usec > arg2->tv_usec)));
+}
+
+static inline void
+driTimeAdd(struct timeval *arg, struct timeval *add)
+{
+ unsigned int sec;
+
+ arg->tv_sec += add->tv_sec;
+ arg->tv_usec += add->tv_usec;
+ sec = arg->tv_usec / 1000000;
+ arg->tv_sec += sec;
+ arg->tv_usec -= sec*1000000;
+}
+
+static void
+driFreeKernelBO(struct _DriKernelBO *kbo)
+{
+ if (!kbo)
+ return;
+
+ (void) drmBOUnreference(kbo->fd, &kbo->bo);
+ free(kbo);
+}
+
+
+static void
+driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
+ struct timeval *time)
+{
+ drmMMListHead *list, *next;
+ struct _DriKernelBO *kbo;
+
+ if (!driTimeAfterEq(time, &fMan->nextCheck))
+ return;
+
+ for (list = fMan->timeoutList.next, next = list->next;
+ list != &fMan->timeoutList;
+ list = next, next = list->next) {
+
+ kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
+
+ if (!driTimeAfterEq(time, &kbo->timeFreed))
+ break;
+
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ DRMLISTDELINIT(&kbo->head);
+ driFreeKernelBO(kbo);
+ }
+
+ fMan->nextCheck = *time;
+ driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
+}
+
+
+/*
+ * Add a _DriKernelBO to the free slab manager.
+ * This means that it is available for reuse, but if it's not
+ * reused in a while, it will be freed.
+ */
+
+static void
+driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
+ struct _DriKernelBO *kbo)
+{
+ struct timeval time;
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ gettimeofday(&time, NULL);
+ driTimeAdd(&time, &fMan->slabTimeout);
+
+ kbo->timeFreed = time;
+
+ if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
+ DRMLISTADD(&kbo->head, &fMan->cached);
+ else
+ DRMLISTADD(&kbo->head, &fMan->unCached);
+
+ DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+}
+
+/*
+ * Get a _DriKernelBO for us to use as storage for a slab.
+ *
+ */
+
+static struct _DriKernelBO *
+driAllocKernelBO(struct _DriSlabSizeHeader *header)
+
+{
+ struct _DriSlabPool *slabPool = header->slabPool;
+ struct _DriFreeSlabManager *fMan = slabPool->fMan;
+ drmMMListHead *list, *next, *head;
+ uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
+ struct _DriKernelBO *kbo;
+ int ret;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+ size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
+ size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
+ _glthread_LOCK_MUTEX(fMan->mutex);
+
+ kbo = NULL;
+
+ retry:
+ head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
+ &fMan->cached : &fMan->unCached;
+
+ for (list = head->next, next = list->next;
+ list != head;
+ list = next, next = list->next) {
+
+ kbo = DRMLISTENTRY(struct _DriKernelBO, list, head);
+
+ if ((kbo->bo.size == size) &&
+ (slabPool->pageAlignment == 0 ||
+ (kbo->pageAlignment % slabPool->pageAlignment) == 0)) {
+
+ DRMLISTDELINIT(&kbo->head);
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ break;
+ }
+
+ kbo = NULL;
+ }
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ if (kbo) {
+ ret = 0;
+ ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
+ (slabPool->proposedFlags ^ kbo->bo.flags),
+ DRM_BO_HINT_DONT_FENCE, 0, 0);
+ if (ret == 0)
+ return kbo;
+
+ driFreeKernelBO(kbo);
+ kbo = NULL;
+ goto retry;
+ }
+
+ kbo = calloc(1, sizeof(struct _DriKernelBO));
+ if (!kbo)
+ return NULL;
+
+ kbo->fd = slabPool->fd;
+ DRMINITLISTHEAD(&kbo->head);
+ DRMINITLISTHEAD(&kbo->timeoutHead);
+
+ ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
+ slabPool->proposedFlags,
+ DRM_BO_HINT_DONT_FENCE, &kbo->bo);
+ if (ret)
+ goto out_err0;
+
+ ret = drmBOMap(kbo->fd, &kbo->bo,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &kbo->virtual);
+
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOUnmap(kbo->fd, &kbo->bo);
+ if (ret)
+ goto out_err1;
+
+ return kbo;
+
+ out_err1:
+ drmBOUnreference(kbo->fd, &kbo->bo);
+ out_err0:
+ free(kbo);
+ return NULL;
+}
+
+
+static int
+driAllocSlab(struct _DriSlabSizeHeader *header)
+{
+ struct _DriSlab *slab;
+ struct _DriSlabBuffer *buf;
+ uint32_t numBuffers;
+ int ret;
+ int i;
+
+ slab = calloc(1, sizeof(*slab));
+ if (!slab)
+ return -ENOMEM;
+
+ slab->kbo = driAllocKernelBO(header);
+ if (!slab->kbo) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ numBuffers = slab->kbo->bo.size / header->bufSize;
+
+ slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ DRMINITLISTHEAD(&slab->head);
+ DRMINITLISTHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->header = header;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ buf->parent = slab;
+ buf->start = i* header->bufSize;
+ buf->mapCount = 0;
+ buf->isSlabBuffer = 1;
+ _glthread_INIT_COND(buf->event);
+ DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ return 0;
+
+ out_err1:
+ driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
+ free(slab->buffers);
+ out_err0:
+ free(slab);
+ return ret;
+}
+
+/*
+ * Delete a buffer from the slab header delayed list and put
+ * it on the slab free list.
+ */
+
+static void
+driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
+{
+ struct _DriSlab *slab = buf->parent;
+ struct _DriSlabSizeHeader *header = slab->header;
+ drmMMListHead *list = &buf->head;
+
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &header->freeSlabs);
+ }
+
+ if (header->slabs.next == &header->slabs ||
+ slab->numFree != slab->numBuffers) {
+
+ drmMMListHead *next;
+ struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
+
+ for (list = header->freeSlabs.next, next = list->next;
+ list != &header->freeSlabs;
+ list = next, next = list->next) {
+
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+
+ DRMLISTDELINIT(list);
+ driSetKernelBOFree(fMan, slab->kbo);
+ free(slab->buffers);
+ free(slab);
+ }
+ }
+}
+
+static void
+driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
+{
+ drmMMListHead *list, *prev;
+ struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+
+ int signaled = 0;
+ int i;
+ int ret;
+
+ list = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ list = list->next;
+ }
+ }
+
+ prev = list->prev;
+ for (; list != &header->delayedBuffers; list = prev, prev = list->prev) {
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ slab = buf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = driFenceFinish(buf->fence, buf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ } else {
+ signaled = driFenceSignaled(buf->fence, buf->fenceType);
+ }
+ if (signaled) {
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ }
+}
+
+
+static struct _DriSlabBuffer *
+driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
+{
+ static struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+ drmMMListHead *list;
+ int count = DRI_SLABPOOL_ALLOC_RETRIES;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ while(header->slabs.next == &header->slabs && count > 0) {
+ driSlabCheckFreeLocked(header, 0);
+ if (header->slabs.next != &header->slabs)
+ break;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ if (count != DRI_SLABPOOL_ALLOC_RETRIES)
+ usleep(1);
+ (void) driAllocSlab(header);
+ _glthread_LOCK_MUTEX(header->mutex);
+ count--;
+ }
+
+ list = header->slabs.next;
+ if (list == &header->slabs) {
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ //assert(0, "no buffers in slab");
+ return NULL;
+ }
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+ if (--slab->numFree == 0)
+ DRMLISTDELINIT(list);
+
+ list = slab->freeBuffers.next;
+ DRMLISTDELINIT(list);
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ return buf;
+}
+
+static void *
+pool_create(struct _DriBufferPool *driPool, unsigned long size,
+ uint64_t flags, unsigned hint, unsigned alignment)
+{
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ struct _DriSlabSizeHeader *header;
+ struct _DriSlabBuffer *buf;
+ void *dummy;
+ int i;
+ int ret;
+
+
+ /*
+ * FIXME: Check for compatibility.
+ */
+
+ header = pool->headers;
+ for (i=0; i<pool->numBuckets; ++i) {
+ if (header->bufSize >= size)
+ break;
+ header++;
+ }
+
+ if (i < pool->numBuckets)
+ return driSlabAllocBuffer(header);
+
+
+ /*
+ * Fall back to allocate a buffer object directly from DRM.
+ * and wrap it in a driBO structure.
+ */
+
+
+ buf = calloc(1, sizeof(*buf));
+
+ if (!buf)
+ return NULL;
+
+ buf->bo = calloc(1, sizeof(*buf->bo));
+ if (!buf->bo)
+ goto out_err0;
+
+ if (alignment) {
+ if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
+ goto out_err1;
+ if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
+ goto out_err1;
+ }
+
+ ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
+ flags, hint, buf->bo);
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &dummy);
+ if (ret)
+ goto out_err2;
+
+ ret = drmBOUnmap(pool->fd, buf->bo);
+ if (ret)
+ goto out_err2;
+
+ return buf;
+ out_err2:
+ drmBOUnreference(pool->fd, buf->bo);
+ out_err1:
+ free(buf->bo);
+ out_err0:
+ free(buf);
+ return NULL;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *driPool, void *private)
+{
+ struct _DriSlabBuffer *buf =
+ (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ int ret;
+
+ ret = drmBOUnreference(pool->fd, buf->bo);
+ free(buf->bo);
+ free(buf);
+ return ret;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ buf->unFenced = 0;
+ buf->mapCount = 0;
+
+ if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
+ header->numDelayed++;
+ } else {
+ driSlabFreeBufferLocked(buf);
+ }
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *driPool, void *private,
+ _glthread_Mutex *mutex, int lazy)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ while(buf->unFenced)
+ _glthread_COND_WAIT(buf->event, *mutex);
+
+ if (!buf->fence)
+ return 0;
+
+ driFenceFinish(buf->fence, buf->fenceType, lazy);
+ driFenceUnReference(&buf->fence);
+
+ return 0;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ int busy;
+
+ if (buf->isSlabBuffer)
+ busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
+ else
+ busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
+
+
+ if (busy) {
+ if (hint & DRM_BO_HINT_DONT_BLOCK)
+ return -EBUSY;
+ else {
+ (void) pool_waitIdle(pool, private, mutex, 0);
+ }
+ }
+
+ ++buf->mapCount;
+ *virtual = (buf->isSlabBuffer) ?
+ (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
+ (void *) buf->bo->virtual;
+
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ --buf->mapCount;
+ if (buf->mapCount == 0 && buf->isSlabBuffer)
+ _glthread_COND_BROADCAST(buf->event);
+
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return buf->bo->offset;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ (void) header;
+ assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return slab->kbo->bo.offset + buf->start;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return buf->start;
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->isSlabBuffer)
+ return buf->bo->flags;
+
+ return buf->parent->kbo->bo.flags;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ if (!buf->isSlabBuffer)
+ return buf->bo->size;
+
+ return buf->parent->header->bufSize;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ drmBO *bo;
+
+ if (buf->fence)
+ driFenceUnReference(&buf->fence);
+
+ buf->fence = driFenceReference(fence);
+ bo = (buf->isSlabBuffer) ?
+ &buf->parent->kbo->bo:
+ buf->bo;
+ buf->fenceType = bo->fenceFlags;
+
+ buf->unFenced = 0;
+ _glthread_COND_BROADCAST(buf->event);
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
+}
+
+static int
+pool_validate(struct _DriBufferPool *pool, void *private,
+ _glthread_Mutex *mutex)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->isSlabBuffer)
+ return 0;
+
+ while(buf->mapCount != 0)
+ _glthread_COND_WAIT(buf->event, *mutex);
+
+ buf->unFenced = 1;
+ return 0;
+}
+
+
+struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
+{
+ struct _DriFreeSlabManager *tmp;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+
+ tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+
+ gettimeofday(&tmp->nextCheck, NULL);
+ driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
+ DRMINITLISTHEAD(&tmp->timeoutList);
+ DRMINITLISTHEAD(&tmp->unCached);
+ DRMINITLISTHEAD(&tmp->cached);
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+
+ return tmp;
+}
+
+void
+driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
+{
+ struct timeval time;
+
+ time = fMan->nextCheck;
+ driTimeAdd(&time, &fMan->checkInterval);
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ assert(fMan->timeoutList.next == &fMan->timeoutList);
+ assert(fMan->unCached.next == &fMan->unCached);
+ assert(fMan->cached.next == &fMan->cached);
+
+ free(fMan);
+}
+
+static void
+driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
+ struct _DriSlabSizeHeader *header)
+{
+ _glthread_INIT_MUTEX(header->mutex);
+ _glthread_LOCK_MUTEX(header->mutex);
+
+ DRMINITLISTHEAD(&header->slabs);
+ DRMINITLISTHEAD(&header->freeSlabs);
+ DRMINITLISTHEAD(&header->delayedBuffers);
+
+ header->numDelayed = 0;
+ header->slabPool = pool;
+ header->bufSize = size;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+driFinishSizeHeader(struct _DriSlabSizeHeader *header)
+{
+ drmMMListHead *list, *next;
+ struct _DriSlabBuffer *buf;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ for (list = header->delayedBuffers.next, next = list->next;
+ list != &header->delayedBuffers;
+ list = next, next = list->next) {
+
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
+ if (buf->fence) {
+ (void) driFenceFinish(buf->fence, buf->fenceType, 0);
+ driFenceUnReference(&buf->fence);
+ }
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+pool_takedown(struct _DriBufferPool *driPool)
+{
+ struct _DriSlabPool *pool = driPool->data;
+ int i;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ driFinishSizeHeader(&pool->headers[i]);
+ }
+
+ free(pool->headers);
+ free(pool->bucketSizes);
+ free(pool);
+ free(driPool);
+}
+
+struct _DriBufferPool *
+driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan)
+{
+ struct _DriBufferPool *driPool;
+ struct _DriSlabPool *pool;
+ uint32_t i;
+
+ driPool = calloc(1, sizeof(*driPool));
+ if (!driPool)
+ return NULL;
+
+ pool = calloc(1, sizeof(*pool));
+ if (!pool)
+ goto out_err0;
+
+ pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
+ if (!pool->bucketSizes)
+ goto out_err1;
+
+ pool->headers = calloc(numSizes, sizeof(*pool->headers));
+ if (!pool->headers)
+ goto out_err2;
+
+ pool->fMan = fMan;
+ pool->proposedFlags = flags;
+ pool->validMask = validMask;
+ pool->numBuckets = numSizes;
+ pool->pageSize = getpagesize();
+ pool->fd = fd;
+ pool->pageAlignment = pageAlignment;
+ pool->maxSlabSize = maxSlabSize;
+ pool->desiredNumBuffers = desiredNumBuffers;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ pool->bucketSizes[i] = (smallestSize << i);
+ driInitSizeHeader(pool, pool->bucketSizes[i],
+ &pool->headers[i]);
+ }
+
+ driPool->data = (void *) pool;
+ driPool->map = &pool_map;
+ driPool->unmap = &pool_unmap;
+ driPool->destroy = &pool_destroy;
+ driPool->offset = &pool_offset;
+ driPool->poolOffset = &pool_poolOffset;
+ driPool->flags = &pool_flags;
+ driPool->size = &pool_size;
+ driPool->create = &pool_create;
+ driPool->fence = &pool_fence;
+ driPool->kernel = &pool_kernel;
+ driPool->validate = &pool_validate;
+ driPool->waitIdle = &pool_waitIdle;
+ driPool->takeDown = &pool_takedown;
+
+ return driPool;
+
+ out_err2:
+ free(pool->bucketSizes);
+ out_err1:
+ free(pool);
+ out_err0:
+ free(driPool);
+
+ return NULL;
+}