GALLIUM_AUXILIARY_DIRS = draw cso_cache pipebuffer tgsi sct translate rtasm util
GALLIUM_AUXILIARIES = $(foreach DIR,$(GALLIUM_AUXILIARY_DIRS),$(TOP)/src/gallium/auxiliary/$(DIR)/lib$(DIR).a)
GALLIUM_DRIVER_DIRS = softpipe i915simple i965simple failover
+GALLIUM_WINSYS_COMMON_DIRS = intel_drm
GALLIUM_DRIVERS = $(foreach DIR,$(GALLIUM_DRIVER_DIRS),$(TOP)/src/gallium/drivers/$(DIR)/lib$(DIR).a)
GALLIUM_WINSYS_DIRS = xlib egl_xlib
include $(TOP)/configs/current
-SUBDIRS = auxiliary drivers
+SUBDIRS = auxiliary drivers winsys/common
default: subdirs
--- /dev/null
+TOP = ../../../..
+include $(TOP)/configs/current
+
+
+SUBDIRS = $(GALLIUM_WINSYS_COMMON_DIRS)
+
+
+default: subdirs
+
+
+subdirs:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE)) || exit 1 ; \
+ fi \
+ done
+
+
+clean:
+ rm -f `find . -name \*.[oa]`
--- /dev/null
+# -*-makefile-*-
+
+
+# We still have a dependency on the "dri" buffer manager. Most likely
+# the interface can be reused in non-dri environments, and also as a
+# frontend to simpler memory managers.
+#
+COMMON_SOURCES =
+
+OBJECTS = $(C_SOURCES:.c=.o) \
+ $(CPP_SOURCES:.cpp=.o) \
+ $(ASM_SOURCES:.S=.o)
+
+
+### Include directories
+INCLUDES = \
+ -I. \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/include \
+ $(DRIVER_INCLUDES)
+
+
+##### RULES #####
+
+.c.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+.cpp.o:
+ $(CXX) -c $(INCLUDES) $(CXXFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+.S.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+
+##### TARGETS #####
+
+default: depend symlinks $(LIBNAME)
+
+
+$(LIBNAME): $(OBJECTS) Makefile $(TOP)/src/gallium/winsys/common/Makefile.template
+ $(TOP)/bin/mklib -o $@ -static $(OBJECTS) $(DRIVER_LIBS)
+
+
+depend: $(C_SOURCES) $(CPP_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) $(CPP_SOURCES) \
+ $(ASM_SOURCES) 2> /dev/null
+
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` `find ../include`
+
+
+# Remove .o and backup files
+clean::
+ -rm -f *.o */*.o *~ *.so *~ server/*.o $(SYMLINKS)
+ -rm -f depend depend.bak
+
+
+include depend
--- /dev/null
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = inteldrm
+
+C_SOURCES = \
+ intel_be_batchbuffer.c \
+ intel_be_context.c \
+ intel_be_device.c \
+ ws_dri_bufmgr.c \
+ ws_dri_drmpool.c \
+ ws_dri_fencemgr.c \
+ ws_dri_mallocpool.c \
+ ws_dri_slabpool.c
+
+
+include ../Makefile.template
+
+DRIVER_DEFINES = $(shell pkg-config libdrm --cflags \
+ && pkg-config libdrm --atleast-version=2.3.1 \
+ && echo "-DDRM_VBLANK_FLIP=DRM_VBLANK_FLIP")
+symlinks:
+
--- /dev/null
+/*
+ * Mesa 3-D graphics library
+ * Version: 6.5.2
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*
+ * Thread support for gl dispatch.
+ *
+ * Initial version by John Stone (j.stone@acm.org) (johns@cs.umr.edu)
+ * and Christoph Poliwoda (poliwoda@volumegraphics.com)
+ * Revised by Keith Whitwell
+ * Adapted for new gl dispatcher by Brian Paul
+ *
+ *
+ *
+ * DOCUMENTATION
+ *
+ * This thread module exports the following types:
+ * _glthread_TSD Thread-specific data area
+ * _glthread_Thread Thread datatype
+ * _glthread_Mutex Mutual exclusion lock
+ *
+ * Macros:
+ * _glthread_DECLARE_STATIC_MUTEX(name) Declare a non-local mutex
+ * _glthread_INIT_MUTEX(name) Initialize a mutex
+ * _glthread_LOCK_MUTEX(name) Lock a mutex
+ * _glthread_UNLOCK_MUTEX(name) Unlock a mutex
+ *
+ * Functions:
+ * _glthread_GetID(v) Get integer thread ID
+ * _glthread_InitTSD() Initialize thread-specific data
+ * _glthread_GetTSD() Get thread-specific data
+ * _glthread_SetTSD() Set thread-specific data
+ *
+ */
+
+/*
+ * If this file is accidentally included by a non-threaded build,
+ * it should not cause the build to fail, or otherwise cause problems.
+ * In general, it should only be included when needed however.
+ */
+
+#ifndef GLTHREAD_H
+#define GLTHREAD_H
+
+
+#if defined(USE_MGL_NAMESPACE)
+#define _glapi_Dispatch _mglapi_Dispatch
+#endif
+
+
+
+#if (defined(PTHREADS) || defined(SOLARIS_THREADS) ||\
+ defined(WIN32_THREADS) || defined(USE_XTHREADS) || defined(BEOS_THREADS)) \
+ && !defined(THREADS)
+# define THREADS
+#endif
+
+#ifdef VMS
+#include <GL/vms_x_fix.h>
+#endif
+
+/*
+ * POSIX threads. This should be your choice in the Unix world
+ * whenever possible. When building with POSIX threads, be sure
+ * to enable any compiler flags which will cause the MT-safe
+ * libc (if one exists) to be used when linking, as well as any
+ * header macros for MT-safe errno, etc. For Solaris, this is the -mt
+ * compiler flag. On Solaris with gcc, use -D_REENTRANT to enable
+ * proper compiling for MT-safe libc etc.
+ */
+#if defined(PTHREADS)
+#include <pthread.h> /* POSIX threads headers */
+
+typedef struct {
+ pthread_key_t key;
+ int initMagic;
+} _glthread_TSD;
+
+typedef pthread_t _glthread_Thread;
+
+typedef pthread_mutex_t _glthread_Mutex;
+
+#define _glthread_DECLARE_STATIC_MUTEX(name) \
+ static _glthread_Mutex name = PTHREAD_MUTEX_INITIALIZER
+
+#define _glthread_INIT_MUTEX(name) \
+ pthread_mutex_init(&(name), NULL)
+
+#define _glthread_DESTROY_MUTEX(name) \
+ pthread_mutex_destroy(&(name))
+
+#define _glthread_LOCK_MUTEX(name) \
+ (void) pthread_mutex_lock(&(name))
+
+#define _glthread_UNLOCK_MUTEX(name) \
+ (void) pthread_mutex_unlock(&(name))
+
+typedef pthread_cond_t _glthread_Cond;
+
+#define _glthread_DECLARE_STATIC_COND(name) \
+ static _glthread_Cond name = PTHREAD_COND_INITIALIZER
+
+#define _glthread_INIT_COND(cond) \
+ pthread_cond_init(&(cond), NULL)
+
+#define _glthread_DESTROY_COND(name) \
+ pthread_cond_destroy(&(name))
+
+#define _glthread_COND_WAIT(cond, mutex) \
+ pthread_cond_wait(&(cond), &(mutex))
+
+#define _glthread_COND_SIGNAL(cond) \
+ pthread_cond_signal(&(cond))
+
+#define _glthread_COND_BROADCAST(cond) \
+ pthread_cond_broadcast(&(cond))
+
+
+#else /* PTHREADS */
+
+typedef unsigned int _glthread_Cond;
+#define _glthread_DECLARE_STATIC_COND(name) \
+// #warning Condition variables not implemented.
+
+#define _glthread_INIT_COND(cond) \
+ abort();
+
+#define _glthread_DESTROY_COND(name) \
+ abort();
+
+#define _glthread_COND_WAIT(cond, mutex) \
+ abort();
+
+#define _glthread_COND_SIGNAL(cond) \
+ abort();
+
+#define _glthread_COND_BROADCAST(cond) \
+ abort();
+
+#endif
+
+
+/*
+ * Solaris threads. Use only up to Solaris 2.4.
+ * Solaris 2.5 and higher provide POSIX threads.
+ * Be sure to compile with -mt on the Solaris compilers, or
+ * use -D_REENTRANT if using gcc.
+ */
+#ifdef SOLARIS_THREADS
+#include <thread.h>
+
+typedef struct {
+ thread_key_t key;
+ mutex_t keylock;
+ int initMagic;
+} _glthread_TSD;
+
+typedef thread_t _glthread_Thread;
+
+typedef mutex_t _glthread_Mutex;
+
+/* XXX need to really implement mutex-related macros */
+#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0
+#define _glthread_INIT_MUTEX(name) (void) name
+#define _glthread_DESTROY_MUTEX(name) (void) name
+#define _glthread_LOCK_MUTEX(name) (void) name
+#define _glthread_UNLOCK_MUTEX(name) (void) name
+
+#endif /* SOLARIS_THREADS */
+
+
+
+
+/*
+ * Windows threads. Should work with Windows NT and 95.
+ * IMPORTANT: Link with multithreaded runtime library when THREADS are
+ * used!
+ */
+#ifdef WIN32_THREADS
+#include <windows.h>
+
+typedef struct {
+ DWORD key;
+ int initMagic;
+} _glthread_TSD;
+
+typedef HANDLE _glthread_Thread;
+
+typedef CRITICAL_SECTION _glthread_Mutex;
+
+#define _glthread_DECLARE_STATIC_MUTEX(name) /*static*/ _glthread_Mutex name = {0,0,0,0,0,0}
+#define _glthread_INIT_MUTEX(name) InitializeCriticalSection(&name)
+#define _glthread_DESTROY_MUTEX(name) DeleteCriticalSection(&name)
+#define _glthread_LOCK_MUTEX(name) EnterCriticalSection(&name)
+#define _glthread_UNLOCK_MUTEX(name) LeaveCriticalSection(&name)
+
+#endif /* WIN32_THREADS */
+
+
+
+
+/*
+ * XFree86 has its own thread wrapper, Xthreads.h
+ * We wrap it again for GL.
+ */
+#ifdef USE_XTHREADS
+#include <X11/Xthreads.h>
+
+typedef struct {
+ xthread_key_t key;
+ int initMagic;
+} _glthread_TSD;
+
+typedef xthread_t _glthread_Thread;
+
+typedef xmutex_rec _glthread_Mutex;
+
+#ifdef XMUTEX_INITIALIZER
+#define _glthread_DECLARE_STATIC_MUTEX(name) \
+ static _glthread_Mutex name = XMUTEX_INITIALIZER
+#else
+#define _glthread_DECLARE_STATIC_MUTEX(name) \
+ static _glthread_Mutex name
+#endif
+
+#define _glthread_INIT_MUTEX(name) \
+ xmutex_init(&(name))
+
+#define _glthread_DESTROY_MUTEX(name) \
+ xmutex_clear(&(name))
+
+#define _glthread_LOCK_MUTEX(name) \
+ (void) xmutex_lock(&(name))
+
+#define _glthread_UNLOCK_MUTEX(name) \
+ (void) xmutex_unlock(&(name))
+
+#endif /* USE_XTHREADS */
+
+
+
+/*
+ * BeOS threads. R5.x required.
+ */
+#ifdef BEOS_THREADS
+
+#include <kernel/OS.h>
+#include <support/TLS.h>
+
+typedef struct {
+ int32 key;
+ int initMagic;
+} _glthread_TSD;
+
+typedef thread_id _glthread_Thread;
+
+/* Use Benaphore, aka speeder semaphore */
+typedef struct {
+ int32 lock;
+ sem_id sem;
+} benaphore;
+typedef benaphore _glthread_Mutex;
+
+#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = { 0, 0 }
+#define _glthread_INIT_MUTEX(name) name.sem = create_sem(0, #name"_benaphore"), name.lock = 0
+#define _glthread_DESTROY_MUTEX(name) delete_sem(name.sem), name.lock = 0
+#define _glthread_LOCK_MUTEX(name) if (name.sem == 0) _glthread_INIT_MUTEX(name); \
+ if (atomic_add(&(name.lock), 1) >= 1) acquire_sem(name.sem)
+#define _glthread_UNLOCK_MUTEX(name) if (atomic_add(&(name.lock), -1) > 1) release_sem(name.sem)
+
+#endif /* BEOS_THREADS */
+
+
+
+#ifndef THREADS
+
+/*
+ * THREADS not defined
+ */
+
+typedef GLuint _glthread_TSD;
+
+typedef GLuint _glthread_Thread;
+
+typedef GLuint _glthread_Mutex;
+
+#define _glthread_DECLARE_STATIC_MUTEX(name) static _glthread_Mutex name = 0
+
+#define _glthread_INIT_MUTEX(name) (void) name
+
+#define _glthread_DESTROY_MUTEX(name) (void) name
+
+#define _glthread_LOCK_MUTEX(name) (void) name
+
+#define _glthread_UNLOCK_MUTEX(name) (void) name
+
+#endif /* THREADS */
+
+
+
+/*
+ * Platform independent thread specific data API.
+ */
+
+extern unsigned long
+_glthread_GetID(void);
+
+
+extern void
+_glthread_InitTSD(_glthread_TSD *);
+
+
+extern void *
+_glthread_GetTSD(_glthread_TSD *);
+
+
+extern void
+_glthread_SetTSD(_glthread_TSD *, void *);
+
+#if defined(GLX_USE_TLS)
+
+extern __thread struct _glapi_table * _glapi_tls_Dispatch
+ __attribute__((tls_model("initial-exec")));
+
+#define GET_DISPATCH() _glapi_tls_Dispatch
+
+#elif !defined(GL_CALL)
+# if defined(THREADS)
+# define GET_DISPATCH() \
+ ((__builtin_expect( _glapi_Dispatch != NULL, 1 )) \
+ ? _glapi_Dispatch : _glapi_get_dispatch())
+# else
+# define GET_DISPATCH() _glapi_Dispatch
+# endif /* defined(THREADS) */
+#endif /* ndef GL_CALL */
+
+
+#endif /* THREADS_H */
--- /dev/null
+
+#include "intel_be_batchbuffer.h"
+#include "intel_be_context.h"
+#include "intel_be_device.h"
+#include <errno.h>
+
+#include "xf86drm.h"
+
+static void
+intel_realloc_relocs(struct intel_be_batchbuffer *batch, int num_relocs)
+{
+ unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
+
+ size *= sizeof(uint32_t);
+ batch->reloc = realloc(batch->reloc, size);
+ batch->reloc_size = num_relocs;
+}
+
+
+void
+intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
+{
+ /*
+ * Get a new, free batchbuffer.
+ */
+ drmBO *bo;
+ struct drm_bo_info_req *req;
+
+ driBOUnrefUserList(batch->list);
+ driBOResetList(batch->list);
+
+ /* base.size is the size available to the i915simple driver */
+ batch->base.size = batch->device->max_batch_size - BATCH_RESERVED;
+ batch->base.actual_size = batch->device->max_batch_size;
+ driBOData(batch->buffer, batch->base.actual_size, NULL, NULL, 0);
+
+ /*
+ * Add the batchbuffer to the validate list.
+ */
+
+ driBOAddListItem(batch->list, batch->buffer,
+ DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
+ &batch->dest_location, &batch->node);
+
+ req = &batch->node->bo_arg.d.req.bo_req;
+
+ /*
+ * Set up information needed for us to make relocations
+ * relative to the underlying drm buffer objects.
+ */
+
+ driReadLockKernelBO();
+ bo = driBOKernel(batch->buffer);
+ req->presumed_offset = (uint64_t) bo->offset;
+ req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
+ batch->drmBOVirtual = (uint8_t *) bo->virtual;
+ driReadUnlockKernelBO();
+
+ /*
+ * Adjust the relocation buffer size.
+ */
+
+ if (batch->reloc_size > INTEL_MAX_RELOCS ||
+ batch->reloc == NULL)
+ intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
+
+ assert(batch->reloc != NULL);
+ batch->reloc[0] = 0; /* No relocs yet. */
+ batch->reloc[1] = 1; /* Reloc type 1 */
+ batch->reloc[2] = 0; /* Only a single relocation list. */
+ batch->reloc[3] = 0; /* Only a single relocation list. */
+
+ batch->base.map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
+ batch->poolOffset = driBOPoolOffset(batch->buffer);
+ batch->base.ptr = batch->base.map;
+ batch->dirty_state = ~0;
+ batch->nr_relocs = 0;
+ batch->flags = 0;
+ batch->id = 0;//batch->intel->intelScreen->batch_id++;
+}
+
+/*======================================================================
+ * Public functions
+ */
+struct intel_be_batchbuffer *
+intel_be_batchbuffer_alloc(struct intel_be_context *intel)
+{
+ struct intel_be_batchbuffer *batch = calloc(sizeof(*batch), 1);
+
+ batch->intel = intel;
+ batch->device = intel->device;
+
+ driGenBuffers(intel->device->batchPool, "batchbuffer", 1,
+ &batch->buffer, 4096,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
+ batch->last_fence = NULL;
+ batch->list = driBOCreateList(20);
+ batch->reloc = NULL;
+ intel_be_batchbuffer_reset(batch);
+ return batch;
+}
+
+void
+intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
+{
+ if (batch->last_fence) {
+ driFenceFinish(batch->last_fence,
+ DRM_FENCE_TYPE_EXE, FALSE);
+ driFenceUnReference(&batch->last_fence);
+ }
+ if (batch->base.map) {
+ driBOUnmap(batch->buffer);
+ batch->base.map = NULL;
+ }
+ driBOUnReference(batch->buffer);
+ driBOFreeList(batch->list);
+ if (batch->reloc)
+ free(batch->reloc);
+ batch->buffer = NULL;
+ free(batch);
+}
+
+void
+intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
+ unsigned pre_add,
+ struct _DriBufferObject *driBO,
+ uint64_t val_flags,
+ uint64_t val_mask)
+{
+ int itemLoc;
+ struct _drmBONode *node;
+ uint32_t *reloc;
+ struct drm_bo_info_req *req;
+
+ driBOAddListItem(batch->list, driBO, val_flags, val_mask,
+ &itemLoc, &node);
+ req = &node->bo_arg.d.req.bo_req;
+
+ if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
+
+ /*
+ * Stop other threads from tampering with the underlying
+ * drmBO while we're reading its offset.
+ */
+
+ driReadLockKernelBO();
+ req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
+ driReadUnlockKernelBO();
+ req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
+ }
+
+ pre_add += driBOPoolOffset(driBO);
+
+ if (batch->nr_relocs == batch->reloc_size)
+ intel_realloc_relocs(batch, batch->reloc_size * 2);
+
+ reloc = batch->reloc +
+ (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
+
+ reloc[0] = ((uint8_t *)batch->base.ptr - batch->drmBOVirtual);
+ i915_batchbuffer_dword(&batch->base, req->presumed_offset + pre_add);
+ reloc[1] = pre_add;
+ reloc[2] = itemLoc;
+ reloc[3] = batch->dest_location;
+ batch->nr_relocs++;
+}
+
+static void
+i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
+{
+ buf->handle = rep->handle;
+ buf->flags = rep->flags;
+ buf->size = rep->size;
+ buf->offset = rep->offset;
+ buf->mapHandle = rep->arg_handle;
+ buf->proposedFlags = rep->proposed_flags;
+ buf->start = rep->buffer_start;
+ buf->fenceFlags = rep->fence_flags;
+ buf->replyFlags = rep->rep_flags;
+ buf->pageAlignment = rep->page_alignment;
+}
+
+static int
+i915_execbuf(struct intel_be_batchbuffer *batch,
+ unsigned int used,
+ boolean ignore_cliprects,
+ drmBOList *list,
+ struct drm_i915_execbuffer *ea)
+{
+// struct intel_be_context *intel = batch->intel;
+ drmBONode *node;
+ drmMMListHead *l;
+ struct drm_i915_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_info_rep *rep;
+ uint64_t *prevNext = NULL;
+ drmBO *buf;
+ int ret = 0;
+ uint32_t count = 0;
+
+ first = NULL;
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+
+ arg = &node->bo_arg;
+ req = &arg->d.req;
+
+ if (!first)
+ first = arg;
+
+ if (prevNext)
+ *prevNext = (unsigned long)arg;
+
+ prevNext = &arg->next;
+ req->bo_req.handle = node->buf->handle;
+ req->op = drm_bo_validate;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.hint |= 0;
+ count++;
+ }
+
+ memset(ea, 0, sizeof(*ea));
+ ea->num_buffers = count;
+ ea->batch.start = batch->poolOffset;
+ ea->batch.used = used;
+#if 0 /* ZZZ JB: no cliprects used */
+ ea->batch.cliprects = intel->pClipRects;
+ ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
+ ea->batch.DR1 = 0;
+ ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
+ (((GLuint) intel->drawY) << 16));
+#else
+ ea->batch.cliprects = NULL;
+ ea->batch.num_cliprects = 0;
+ ea->batch.DR1 = 0;
+ ea->batch.DR4 = 0;
+#endif
+ ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
+ ea->ops_list = (unsigned long) first;
+ first->reloc_ptr = (unsigned long) batch->reloc;
+ batch->reloc[0] = batch->nr_relocs;
+
+ //return -EFAULT;
+ do {
+ ret = drmCommandWriteRead(batch->device->fd, DRM_I915_EXECBUFFER, ea,
+ sizeof(*ea));
+ } while (ret == -EAGAIN);
+
+ if (ret != 0)
+ return ret;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ arg = &node->bo_arg;
+ rep = &arg->d.rep.bo_info;
+
+ if (!arg->handled) {
+ return -EFAULT;
+ }
+ if (arg->d.rep.ret)
+ return arg->d.rep.ret;
+
+ buf = node->buf;
+ i915_drm_copy_reply(rep, buf);
+ }
+ return 0;
+}
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static struct _DriFenceObject *
+do_flush_locked(struct intel_be_batchbuffer *batch,
+ unsigned int used,
+ boolean ignore_cliprects, boolean allow_unlock)
+{
+ struct intel_be_context *intel = batch->intel;
+ struct _DriFenceObject *fo;
+ drmFence fence;
+ drmBOList *boList;
+ struct drm_i915_execbuffer ea;
+ int ret = 0;
+
+ driBOValidateUserList(batch->list);
+ boList = driGetdrmBOList(batch->list);
+
+#if 0 /* ZZZ JB Allways run */
+ if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
+#else
+ if (1) {
+#endif
+ ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
+ } else {
+ driPutdrmBOList(batch->list);
+ fo = NULL;
+ goto out;
+ }
+ driPutdrmBOList(batch->list);
+ if (ret)
+ abort();
+
+ if (ea.fence_arg.error != 0) {
+
+ /*
+ * The hardware has been idled by the kernel.
+ * Don't fence the driBOs.
+ */
+
+ if (batch->last_fence)
+ driFenceUnReference(&batch->last_fence);
+#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
+ _mesa_printf("fence error\n");
+#endif
+ batch->last_fence = NULL;
+ fo = NULL;
+ goto out;
+ }
+
+ fence.handle = ea.fence_arg.handle;
+ fence.fence_class = ea.fence_arg.fence_class;
+ fence.type = ea.fence_arg.type;
+ fence.flags = ea.fence_arg.flags;
+ fence.signaled = ea.fence_arg.signaled;
+
+ fo = driBOFenceUserList(batch->device->fenceMgr, batch->list,
+ "SuperFence", &fence);
+
+ if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
+ if (batch->last_fence)
+ driFenceUnReference(&batch->last_fence);
+ /*
+ * FIXME: Context last fence??
+ */
+ batch->last_fence = fo;
+ driFenceReference(fo);
+ }
+ out:
+#if 0 /* ZZZ JB: fix this */
+ intel->vtbl.lost_hardware(intel);
+#else
+ (void)intel;
+#endif
+ return fo;
+}
+
+
+struct _DriFenceObject *
+intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch)
+{
+ struct intel_be_context *intel = batch->intel;
+ unsigned int used = batch->base.ptr - batch->base.map;
+ boolean was_locked = batch->intel->hardware_locked(intel);
+ struct _DriFenceObject *fence;
+
+ if (used == 0) {
+ driFenceReference(batch->last_fence);
+ return batch->last_fence;
+ }
+
+ /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
+ * performance drain that we would like to avoid.
+ */
+#if 0 /* ZZZ JB: what should we do here? */
+ if (used & 4) {
+ ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->base.ptr)[1] = 0;
+ ((int *) batch->base.ptr)[2] = MI_BATCH_BUFFER_END;
+ used += 12;
+ }
+ else {
+ ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->base.ptr)[1] = MI_BATCH_BUFFER_END;
+ used += 8;
+ }
+#else
+ if (used & 4) {
+ ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
+ ((int *) batch->base.ptr)[1] = 0;
+ ((int *) batch->base.ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
+ used += 12;
+ }
+ else {
+ ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
+ ((int *) batch->base.ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
+ used += 8;
+ }
+#endif
+ driBOUnmap(batch->buffer);
+ batch->base.ptr = NULL;
+ batch->base.map = NULL;
+
+ /* TODO: Just pass the relocation list and dma buffer up to the
+ * kernel.
+ */
+ if (!was_locked)
+ intel->hardware_lock(intel);
+
+ fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
+ FALSE);
+
+ if (!was_locked)
+ intel->hardware_unlock(intel);
+
+ /* Reset the buffer:
+ */
+ intel_be_batchbuffer_reset(batch);
+ return fence;
+}
+
+void
+intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
+{
+ struct _DriFenceObject *fence = intel_be_batchbuffer_flush(batch);
+ driFenceFinish(fence, driFenceType(fence), FALSE);
+ driFenceUnReference(&fence);
+}
+
+#if 0
+void
+intel_be_batchbuffer_data(struct intel_be_batchbuffer *batch,
+ const void *data, unsigned int bytes, unsigned int flags)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(batch, bytes, flags);
+ memcpy(batch->base.ptr, data, bytes);
+ batch->base.ptr += bytes;
+}
+#endif
--- /dev/null
+
+#ifndef INTEL_BE_BATCHBUFFER_H
+#define INTEL_BE_BATCHBUFFER_H
+
+#include "i915simple/i915_batch.h"
+
+#include "ws_dri_bufmgr.h"
+
+#define BATCH_RESERVED 16
+
+#define INTEL_DEFAULT_RELOCS 100
+#define INTEL_MAX_RELOCS 400
+
+#define INTEL_BATCH_NO_CLIPRECTS 0x1
+#define INTEL_BATCH_CLIPRECTS 0x2
+
+struct intel_be_context;
+struct intel_be_device;
+
+struct intel_be_batchbuffer
+{
+ struct i915_batchbuffer base;
+
+ struct intel_be_context *intel;
+ struct intel_be_device *device;
+
+ struct _DriBufferObject *buffer;
+ struct _DriFenceObject *last_fence;
+ uint32_t flags;
+
+ struct _DriBufferList *list;
+ size_t list_count;
+
+ uint32_t *reloc;
+ size_t reloc_size;
+ size_t nr_relocs;
+
+ uint32_t dirty_state;
+ uint32_t id;
+
+ uint32_t poolOffset;
+ uint8_t *drmBOVirtual;
+ struct _drmBONode *node; /* Validation list node for this buffer */
+ int dest_location; /* Validation list sequence for this buffer */
+};
+
+struct intel_be_batchbuffer *
+intel_be_batchbuffer_alloc(struct intel_be_context *intel);
+
+void
+intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch);
+
+void
+intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch);
+
+struct _DriFenceObject *
+intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch);
+
+void
+intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch);
+
+void
+intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
+ unsigned pre_add,
+ struct _DriBufferObject *driBO,
+ uint64_t val_flags,
+ uint64_t val_mask);
+
+#endif
--- /dev/null
+
+/*
+ * Authors: Jakob Bornecrantz <jakob-at-tungstengraphics.com>
+ */
+
+#include "ws_dri_fencemgr.h"
+#include "intel_be_device.h"
+#include "intel_be_context.h"
+#include "intel_be_batchbuffer.h"
+
+static INLINE struct intel_be_context *
+intel_be_context(struct i915_winsys *sws)
+{
+ return (struct intel_be_context *)sws;
+}
+
+/* Simple batchbuffer interface:
+ */
+
+static struct i915_batchbuffer*
+intel_i915_batch_get(struct i915_winsys *sws)
+{
+ struct intel_be_context *intel = intel_be_context(sws);
+ return &intel->batch->base;
+}
+
+static void intel_i915_batch_reloc(struct i915_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned access_flags,
+ unsigned delta)
+{
+ struct intel_be_context *intel = intel_be_context(sws);
+
+ unsigned flags = DRM_BO_FLAG_MEM_TT;
+ unsigned mask = DRM_BO_MASK_MEM;
+
+ if (access_flags & I915_BUFFER_ACCESS_WRITE) {
+ flags |= DRM_BO_FLAG_WRITE;
+ mask |= DRM_BO_FLAG_WRITE;
+ }
+
+ if (access_flags & I915_BUFFER_ACCESS_READ) {
+ flags |= DRM_BO_FLAG_READ;
+ mask |= DRM_BO_FLAG_READ;
+ }
+
+ intel_be_offset_relocation(intel->batch,
+ delta,
+ dri_bo(buf),
+ flags,
+ mask);
+}
+
+static void intel_i915_batch_flush(struct i915_winsys *sws,
+ struct pipe_fence_handle **fence)
+{
+ struct intel_be_context *intel = intel_be_context(sws);
+
+ union {
+ struct _DriFenceObject *dri;
+ struct pipe_fence_handle *pipe;
+ } fu;
+
+ if (fence)
+ assert(!*fence);
+
+ fu.dri = intel_be_batchbuffer_flush(intel->batch);
+
+ if (!fu.dri) {
+ assert(0);
+ *fence = NULL;
+ return;
+ }
+
+ if (fu.dri) {
+ if (fence)
+ *fence = fu.pipe;
+ else
+ driFenceUnReference(&fu.dri);
+ }
+
+}
+
+boolean
+intel_be_init_context(struct intel_be_context *intel, struct intel_be_device *device)
+{
+ assert(intel);
+ assert(device);
+
+ intel->device = device;
+
+ /* TODO move framebuffer createion to the driver */
+
+ intel->base.batch_get = intel_i915_batch_get;
+ intel->base.batch_reloc = intel_i915_batch_reloc;
+ intel->base.batch_flush = intel_i915_batch_flush;
+
+ intel->batch = intel_be_batchbuffer_alloc(intel);
+
+ return true;
+}
+
+void
+intel_be_destroy_context(struct intel_be_context *intel)
+{
+ intel_be_batchbuffer_free(intel->batch);
+}
--- /dev/null
+/* These need to be diffrent from the intel winsys */
+#ifndef INTEL_BE_CONTEXT_H
+#define INTEL_BE_CONTEXT_H
+
+#include "i915simple/i915_winsys.h"
+
+struct intel_be_context
+{
+ /** Interface to i915simple driver */
+ struct i915_winsys base;
+
+ struct intel_be_device *device;
+ struct intel_be_batchbuffer *batch;
+
+ /*
+ * Hardware lock functions.
+ *
+ * Needs to be filled in by the winsys.
+ */
+ void (*hardware_lock)(struct intel_be_context *context);
+ void (*hardware_unlock)(struct intel_be_context *context);
+ boolean (*hardware_locked)(struct intel_be_context *context);
+};
+
+/**
+ * Intialize a allocated intel_be_context struct.
+ *
+ * Remember to set the hardware_* functions.
+ */
+boolean
+intel_be_init_context(struct intel_be_context *intel,
+ struct intel_be_device *device);
+
+/**
+ * Destroy a intel_be_context.
+ * Does not free the struct that is up to the winsys.
+ */
+void
+intel_be_destroy_context(struct intel_be_context *intel);
+#endif
--- /dev/null
+
+
+/*
+ * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Jakob Bornecrantz <jakob-at-tungstengraphics-dot-com>
+ */
+
+#include "intel_be_device.h"
+#include "ws_dri_bufmgr.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+
+#include "pipe/p_winsys.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_util.h"
+#include "pipe/p_inlines.h"
+
+/* Turn a pipe winsys into an intel/pipe winsys:
+ */
+static INLINE struct intel_be_device *
+intel_be_device( struct pipe_winsys *winsys )
+{
+ return (struct intel_be_device *)winsys;
+}
+
+
+/*
+ * Buffer functions.
+ *
+ * Most callbacks map direcly onto dri_bufmgr operations:
+ */
+
+static void *intel_be_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags )
+{
+ unsigned drm_flags = 0;
+
+ if (flags & PIPE_BUFFER_USAGE_CPU_WRITE)
+ drm_flags |= DRM_BO_FLAG_WRITE;
+
+ if (flags & PIPE_BUFFER_USAGE_CPU_READ)
+ drm_flags |= DRM_BO_FLAG_READ;
+
+ return driBOMap( dri_bo(buf), drm_flags, 0 );
+}
+
+static void intel_be_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ driBOUnmap( dri_bo(buf) );
+}
+
+static void
+intel_be_buffer_destroy(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ driBOUnReference( dri_bo(buf) );
+ FREE(buf);
+}
+
+static struct pipe_buffer *
+intel_be_buffer_create(struct pipe_winsys *winsys,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size )
+{
+ struct intel_be_buffer *buffer = CALLOC_STRUCT( intel_be_buffer );
+ struct intel_be_device *iws = intel_be_device(winsys);
+ unsigned flags = 0;
+ struct _DriBufferPool *pool;
+
+ buffer->base.refcount = 1;
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+ if (usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_CONSTANT)) {
+ flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+ pool = iws->mallocPool;
+ } else if (usage & PIPE_BUFFER_USAGE_CUSTOM) {
+ /* For vertex buffers */
+ flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
+ pool = iws->vertexPool;
+ } else {
+ flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
+ pool = iws->regionPool;
+ }
+
+ if (usage & PIPE_BUFFER_USAGE_GPU_READ)
+ flags |= DRM_BO_FLAG_READ;
+
+ if (usage & PIPE_BUFFER_USAGE_GPU_WRITE)
+ flags |= DRM_BO_FLAG_WRITE;
+
+ /* drm complains if we don't set any read/write flags.
+ */
+ if ((flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) == 0)
+ flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
+
+ buffer->pool = pool;
+ driGenBuffers( buffer->pool,
+ "pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
+
+ driBOData( buffer->driBO, size, NULL, buffer->pool, 0 );
+
+ return &buffer->base;
+}
+
+
+static struct pipe_buffer *
+intel_be_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
+{
+ struct intel_be_buffer *buffer = CALLOC_STRUCT( intel_be_buffer );
+ struct intel_be_device *iws = intel_be_device(winsys);
+
+ driGenUserBuffer( iws->regionPool,
+ "pipe user buffer", &buffer->driBO, ptr, bytes );
+
+ buffer->base.refcount = 1;
+
+ return &buffer->base;
+}
+
+
+/*
+ * Surface functions.
+ *
+ * Deprecated!
+ */
+
+static struct pipe_surface *
+intel_i915_surface_alloc(struct pipe_winsys *winsys)
+{
+ assert((size_t)"intel_i915_surface_alloc is deprecated" & 0);
+ return NULL;
+}
+
+static int
+intel_i915_surface_alloc_storage(struct pipe_winsys *winsys,
+ struct pipe_surface *surf,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned flags,
+ unsigned tex_usage)
+{
+ assert((size_t)"intel_i915_surface_alloc_storage is deprecated" & 0);
+ return -1;
+}
+
+static void
+intel_i915_surface_release(struct pipe_winsys *winsys, struct pipe_surface **s)
+{
+ assert((size_t)"intel_i915_surface_release is deprecated" & 0);
+}
+
+/*
+ * Fence functions
+ */
+
+static void
+intel_be_fence_reference( struct pipe_winsys *sws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence )
+{
+ if (*ptr)
+ driFenceUnReference((struct _DriFenceObject **)ptr);
+
+ if (fence)
+ *ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
+}
+
+static int
+intel_be_fence_signalled( struct pipe_winsys *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ return driFenceSignaled((struct _DriFenceObject *)fence, flag);
+}
+
+static int
+intel_be_fence_finish( struct pipe_winsys *sws,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ return driFenceFinish((struct _DriFenceObject *)fence, flag, 0);
+}
+
+/*
+ * Misc functions
+ */
+
+boolean
+intel_be_init_device(struct intel_be_device *dev, int fd)
+{
+ dev->fd = fd;
+ dev->max_batch_size = 16 * 4096;
+ dev->max_vertex_size = 128 * 4096;
+
+ dev->base.buffer_create = intel_be_buffer_create;
+ dev->base.user_buffer_create = intel_be_user_buffer_create;
+ dev->base.buffer_map = intel_be_buffer_map;
+ dev->base.buffer_unmap = intel_be_buffer_unmap;
+ dev->base.buffer_destroy = intel_be_buffer_destroy;
+ dev->base.surface_alloc = intel_i915_surface_alloc;
+ dev->base.surface_alloc_storage = intel_i915_surface_alloc_storage;
+ dev->base.surface_release = intel_i915_surface_release;
+ dev->base.fence_reference = intel_be_fence_reference;
+ dev->base.fence_signalled = intel_be_fence_signalled;
+ dev->base.fence_finish = intel_be_fence_finish;
+
+#if 0 /* Set by the winsys */
+ dev->base.flush_frontbuffer = intel_flush_frontbuffer;
+ dev->base.get_name = intel_get_name;
+#endif
+
+ dev->fMan = driInitFreeSlabManager(10, 10);
+ dev->fenceMgr = driFenceMgrTTMInit(dev->fd);
+
+ dev->mallocPool = driMallocPoolInit();
+ dev->staticPool = driDRMPoolInit(dev->fd);
+ dev->regionPool = driDRMPoolInit(dev->fd);
+ dev->vertexPool = driSlabPoolInit(dev->fd,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ dev->max_vertex_size,
+ 1, 120, dev->max_vertex_size * 4, 0,
+ dev->fMan);
+
+ dev->batchPool = driSlabPoolInit(dev->fd,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ dev->max_batch_size,
+ 1, 40, dev->max_batch_size * 16, 0,
+ dev->fMan);
+
+ return true;
+}
+
+void
+intel_be_destroy_device(struct intel_be_device *dev)
+{
+ driPoolTakeDown(dev->mallocPool);
+ driPoolTakeDown(dev->staticPool);
+ driPoolTakeDown(dev->regionPool);
+ driPoolTakeDown(dev->vertexPool);
+ driPoolTakeDown(dev->batchPool);
+
+ /** TODO takedown fenceMgr and fMan */
+}
--- /dev/null
+#ifndef INTEL_DRM_DEVICE_H
+#define INTEL_DRM_DEVICE_H
+
+#include "pipe/p_winsys.h"
+#include "pipe/p_context.h"
+
+/*
+ * Device
+ */
+
+struct intel_be_device
+{
+ struct pipe_winsys base;
+
+ int fd; /**< Drm file discriptor */
+
+ size_t max_batch_size;
+ size_t max_vertex_size;
+
+ struct _DriFenceMgr *fenceMgr;
+
+ struct _DriBufferPool *batchPool;
+ struct _DriBufferPool *regionPool;
+ struct _DriBufferPool *mallocPool;
+ struct _DriBufferPool *vertexPool;
+ struct _DriBufferPool *staticPool;
+ struct _DriFreeSlabManager *fMan;
+};
+
+boolean
+intel_be_init_device(struct intel_be_device *device, int fd);
+
+void
+intel_be_destroy_device(struct intel_be_device *dev);
+
+/*
+ * Buffer
+ */
+
+struct intel_be_buffer {
+ struct pipe_buffer base;
+ struct _DriBufferPool *pool;
+ struct _DriBufferObject *driBO;
+};
+
+static INLINE struct intel_be_buffer *
+intel_be_buffer( struct pipe_buffer *buf )
+{
+ return (struct intel_be_buffer *)buf;
+}
+
+static INLINE struct _DriBufferObject *
+dri_bo( struct pipe_buffer *buf )
+{
+ return intel_be_buffer(buf)->driBO;
+}
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "glthread.h"
+#include "errno.h"
+#include "ws_dri_bufmgr.h"
+#include "string.h"
+#include "pipe/p_debug.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+
+
+/*
+ * This lock is here to protect drmBO structs changing underneath us during a
+ * validate list call, since validatelist cannot take individiual locks for
+ * each drmBO. Validatelist takes this lock in write mode. Any access to an
+ * individual drmBO should take this lock in read mode, since in that case, the
+ * driBufferObject mutex will protect the access. Locking order is
+ * driBufferObject mutex - > this rw lock.
+ */
+
+_glthread_DECLARE_STATIC_MUTEX(bmMutex);
+_glthread_DECLARE_STATIC_COND(bmCond);
+
+static int kernelReaders = 0;
+static int num_buffers = 0;
+static int num_user_buffers = 0;
+
+static drmBO *drmBOListBuf(void *iterator)
+{
+ drmBONode *node;
+ drmMMListHead *l = (drmMMListHead *) iterator;
+ node = DRMLISTENTRY(drmBONode, l, head);
+ return node->buf;
+}
+
+static void *drmBOListIterator(drmBOList *list)
+{
+ void *ret = list->list.next;
+
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+static void *drmBOListNext(drmBOList *list, void *iterator)
+{
+ void *ret;
+
+ drmMMListHead *l = (drmMMListHead *) iterator;
+ ret = l->next;
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
+ uint64_t arg0,
+ uint64_t arg1)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->free.next;
+ if (l == &list->free) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ return NULL;
+ }
+ list->numCurrent++;
+ }
+ else {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ }
+ node->buf = item;
+ node->arg0 = arg0;
+ node->arg1 = arg1;
+ DRMLISTADD(&node->head, &list->list);
+ list->numOnList++;
+ return node;
+}
+
+static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
+ uint64_t mask, int *newItem)
+{
+ drmBONode *node, *cur;
+ drmMMListHead *l;
+
+ *newItem = 0;
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ if (node->buf == buf) {
+ cur = node;
+ break;
+ }
+ }
+ if (!cur) {
+ cur = drmAddListItem(list, buf, flags, mask);
+ if (!cur) {
+ return -ENOMEM;
+ }
+ *newItem = 1;
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ }
+ else {
+ uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
+ uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
+
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+
+ cur->arg1 |= mask;
+ cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
+
+ if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
+ (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void drmBOFreeList(drmBOList *list)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->list.next;
+ while(l != &list->list) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ l = list->list.next;
+ list->numCurrent--;
+ list->numOnList--;
+ }
+
+ l = list->free.next;
+ while(l != &list->free) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ l = list->free.next;
+ list->numCurrent--;
+ }
+}
+
+static int drmAdjustListNodes(drmBOList *list)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+ int ret = 0;
+
+ while(list->numCurrent < list->numTarget) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ list->numCurrent++;
+ DRMLISTADD(&node->head, &list->free);
+ }
+
+ while(list->numCurrent > list->numTarget) {
+ l = list->free.next;
+ if (l == &list->free)
+ break;
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ free(node);
+ list->numCurrent--;
+ }
+ return ret;
+}
+
+static int drmBOCreateList(int numTarget, drmBOList *list)
+{
+ DRMINITLISTHEAD(&list->list);
+ DRMINITLISTHEAD(&list->free);
+ list->numTarget = numTarget;
+ list->numCurrent = 0;
+ list->numOnList = 0;
+ return drmAdjustListNodes(list);
+}
+
+static int drmBOResetList(drmBOList *list)
+{
+ drmMMListHead *l;
+ int ret;
+
+ ret = drmAdjustListNodes(list);
+ if (ret)
+ return ret;
+
+ l = list->list.next;
+ while (l != &list->list) {
+ DRMLISTDEL(l);
+ DRMLISTADD(l, &list->free);
+ list->numOnList--;
+ l = list->list.next;
+ }
+ return drmAdjustListNodes(list);
+}
+
+void driWriteLockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ while(kernelReaders != 0)
+ _glthread_COND_WAIT(bmCond, bmMutex);
+}
+
+void driWriteUnlockKernelBO(void)
+{
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void driReadLockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ kernelReaders++;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void driReadUnlockKernelBO(void)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ if (--kernelReaders == 0)
+ _glthread_COND_BROADCAST(bmCond);
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+
+
+
+/*
+ * TODO: Introduce fence pools in the same way as
+ * buffer object pools.
+ */
+
+typedef struct _DriBufferObject
+{
+ DriBufferPool *pool;
+ _glthread_Mutex mutex;
+ int refCount;
+ const char *name;
+ uint64_t flags;
+ unsigned hint;
+ unsigned alignment;
+ unsigned createdByReference;
+ void *private;
+ /* user-space buffer: */
+ unsigned userBuffer;
+ void *userData;
+ unsigned userSize;
+} DriBufferObject;
+
+typedef struct _DriBufferList {
+ drmBOList drmBuffers; /* List of kernel buffers needing validation */
+ drmBOList driBuffers; /* List of user-space buffers needing validation */
+} DriBufferList;
+
+
+void
+bmError(int val, const char *file, const char *function, int line)
+{
+ printf("Fatal video memory manager error \"%s\".\n"
+ "Check kernel logs or set the LIBGL_DEBUG\n"
+ "environment variable to \"verbose\" for more info.\n"
+ "Detected in file %s, line %d, function %s.\n",
+ strerror(-val), file, line, function);
+#ifndef NDEBUG
+ abort();
+#else
+ abort();
+#endif
+}
+
+extern drmBO *
+driBOKernel(struct _DriBufferObject *buf)
+{
+ drmBO *ret;
+
+ driReadLockKernelBO();
+ _glthread_LOCK_MUTEX(buf->mutex);
+ assert(buf->private != NULL);
+ ret = buf->pool->kernel(buf->pool, buf->private);
+ if (!ret)
+ BM_CKFATAL(-EINVAL);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+void
+driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
+{
+
+ /*
+ * This function may block. Is it sane to keep the mutex held during
+ * that time??
+ */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void *
+driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
+{
+ void *virtual;
+ int retval;
+
+ if (buf->userBuffer) {
+ return buf->userData;
+ }
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ assert(buf->private != NULL);
+ retval = buf->pool->map(buf->pool, buf->private, flags, hint,
+ &buf->mutex, &virtual);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return retval == 0 ? virtual : NULL;
+}
+
+void
+driBOUnmap(struct _DriBufferObject *buf)
+{
+ if (buf->userBuffer)
+ return;
+
+ assert(buf->private != NULL);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+unsigned long
+driBOOffset(struct _DriBufferObject *buf)
+{
+ unsigned long ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->offset(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+unsigned long
+driBOPoolOffset(struct _DriBufferObject *buf)
+{
+ unsigned long ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->poolOffset(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+uint64_t
+driBOFlags(struct _DriBufferObject *buf)
+{
+ uint64_t ret;
+
+ assert(buf->private != NULL);
+
+ driReadLockKernelBO();
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->flags(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+struct _DriBufferObject *
+driBOReference(struct _DriBufferObject *buf)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (++buf->refCount == 1) {
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(-EINVAL);
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return buf;
+}
+
+void
+driBOUnReference(struct _DriBufferObject *buf)
+{
+ int tmp;
+
+ if (!buf)
+ return;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ tmp = --buf->refCount;
+ if (!tmp) {
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ if (buf->private) {
+ if (buf->createdByReference)
+ buf->pool->unreference(buf->pool, buf->private);
+ else
+ buf->pool->destroy(buf->pool, buf->private);
+ }
+ if (buf->userBuffer)
+ num_user_buffers--;
+ else
+ num_buffers--;
+ free(buf);
+ } else
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+}
+
+
+int
+driBOData(struct _DriBufferObject *buf,
+ unsigned size, const void *data,
+ DriBufferPool *newPool,
+ uint64_t flags)
+{
+ void *virtual = NULL;
+ int newBuffer;
+ int retval = 0;
+ struct _DriBufferPool *pool;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ pool = buf->pool;
+
+ if (pool == NULL && newPool != NULL) {
+ buf->pool = newPool;
+ pool = newPool;
+ }
+ if (newPool == NULL)
+ newPool = pool;
+
+ if (!pool->create) {
+ assert((size_t)"driBOData called on invalid buffer\n" & 0);
+ BM_CKFATAL(-EINVAL);
+ }
+
+ newBuffer = (!buf->private || pool != newPool ||
+ pool->size(pool, buf->private) < size);
+
+ if (!flags)
+ flags = buf->flags;
+
+ if (newBuffer) {
+
+ if (buf->createdByReference) {
+ assert((size_t)"driBOData requiring resizing called on shared buffer.\n" & 0);
+ BM_CKFATAL(-EINVAL);
+ }
+
+ if (buf->private)
+ buf->pool->destroy(buf->pool, buf->private);
+
+ pool = newPool;
+ buf->pool = newPool;
+ buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
+ buf->alignment);
+ if (!buf->private)
+ retval = -ENOMEM;
+
+ if (retval == 0)
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
+ } else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
+ /*
+ * Buffer is busy. need to create a new one.
+ */
+
+ void *newBuf;
+
+ newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
+ buf->alignment);
+ if (newBuf) {
+ buf->pool->destroy(buf->pool, buf->private);
+ buf->private = newBuf;
+ }
+
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
+ } else {
+ uint64_t flag_diff = flags ^ buf->flags;
+
+ /*
+ * We might need to change buffer flags.
+ */
+
+ if (flag_diff){
+ assert(pool->setStatus != NULL);
+ BM_CKFATAL(pool->unmap(pool, buf->private));
+ BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
+ buf->flags));
+ if (!data)
+ goto out;
+
+ retval = pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
+ }
+ }
+
+ if (retval == 0) {
+ if (data)
+ memcpy(virtual, data, size);
+
+ BM_CKFATAL(pool->unmap(pool, buf->private));
+ }
+
+ out:
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return retval;
+}
+
+void
+driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, const void *data)
+{
+ void *virtual;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &buf->mutex,
+ &virtual));
+ memcpy((unsigned char *) virtual + offset, data, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, void *data)
+{
+ void *virtual;
+
+ assert(!buf->userBuffer); /* XXX just do a memcpy? */
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
+ memcpy(data, (unsigned char *) virtual + offset, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOSetReferenced(struct _DriBufferObject *buf,
+ unsigned long handle)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->private != NULL) {
+ assert((size_t)"Invalid buffer for setReferenced\n" & 0);
+ BM_CKFATAL(-EINVAL);
+
+ }
+ if (buf->pool->reference == NULL) {
+ assert((size_t)"Invalid buffer pool for setReferenced\n" & 0);
+ BM_CKFATAL(-EINVAL);
+ }
+ buf->private = buf->pool->reference(buf->pool, handle);
+ if (!buf->private) {
+ assert((size_t)"Invalid buffer pool for setStatic\n" & 0);
+ BM_CKFATAL(-ENOMEM);
+ }
+ buf->createdByReference = TRUE;
+ buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+int
+driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, uint64_t flags, unsigned hint)
+{
+ struct _DriBufferObject *buf;
+ int i;
+
+ flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
+
+ ++num_buffers;
+
+ assert(pool);
+
+ for (i = 0; i < n; ++i) {
+ buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
+ if (!buf)
+ return -ENOMEM;
+
+ _glthread_INIT_MUTEX(buf->mutex);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ buf->refCount = 1;
+ buf->flags = flags;
+ buf->hint = hint;
+ buf->name = name;
+ buf->alignment = alignment;
+ buf->pool = pool;
+ buf->createdByReference = 0;
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ buffers[i] = buf;
+ }
+ return 0;
+}
+
+void
+driGenUserBuffer(struct _DriBufferPool *pool,
+ const char *name,
+ struct _DriBufferObject **buffers,
+ void *ptr, unsigned bytes)
+{
+ const unsigned alignment = 1, flags = 0, hint = 0;
+
+ --num_buffers; /* JB: is inced in GenBuffes */
+ driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
+ ++num_user_buffers;
+
+ (*buffers)->userBuffer = 1;
+ (*buffers)->userData = ptr;
+ (*buffers)->userSize = bytes;
+}
+
+void
+driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ driBOUnReference(buffers[i]);
+ }
+}
+
+
+void
+driInitBufMgr(int fd)
+{
+ ;
+}
+
+/*
+ * Note that lists are per-context and don't need mutex protection.
+ */
+
+struct _DriBufferList *
+driBOCreateList(int target)
+{
+ struct _DriBufferList *list = calloc(sizeof(*list), 1);
+
+ BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
+ BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
+ return list;
+}
+
+int
+driBOResetList(struct _DriBufferList * list)
+{
+ int ret;
+ ret = drmBOResetList(&list->drmBuffers);
+ if (ret)
+ return ret;
+ ret = drmBOResetList(&list->driBuffers);
+ return ret;
+}
+
+void
+driBOFreeList(struct _DriBufferList * list)
+{
+ drmBOFreeList(&list->drmBuffers);
+ drmBOFreeList(&list->driBuffers);
+ free(list);
+}
+
+
+/*
+ * Copied from libdrm, because it is needed by driAddValidateItem.
+ */
+
+static drmBONode *
+driAddListItem(drmBOList * list, drmBO * item,
+ uint64_t arg0, uint64_t arg1)
+{
+ drmBONode *node;
+ drmMMListHead *l;
+
+ l = list->free.next;
+ if (l == &list->free) {
+ node = (drmBONode *) malloc(sizeof(*node));
+ if (!node) {
+ return NULL;
+ }
+ list->numCurrent++;
+ } else {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(drmBONode, l, head);
+ }
+ memset(&node->bo_arg, 0, sizeof(node->bo_arg));
+ node->buf = item;
+ node->arg0 = arg0;
+ node->arg1 = arg1;
+ DRMLISTADDTAIL(&node->head, &list->list);
+ list->numOnList++;
+ return node;
+}
+
+/*
+ * Slightly modified version compared to the libdrm version.
+ * This one returns the list index of the buffer put on the list.
+ */
+
+static int
+driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
+ uint64_t mask, int *itemLoc,
+ struct _drmBONode **pnode)
+{
+ drmBONode *node, *cur;
+ drmMMListHead *l;
+ int count = 0;
+
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(drmBONode, l, head);
+ if (node->buf == buf) {
+ cur = node;
+ break;
+ }
+ count++;
+ }
+ if (!cur) {
+ cur = driAddListItem(list, buf, flags, mask);
+ if (!cur)
+ return -ENOMEM;
+
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ } else {
+ uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
+ uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
+
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+
+ cur->arg1 |= mask;
+ cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
+
+ if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
+ (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
+ return -EINVAL;
+ }
+ }
+ *itemLoc = count;
+ *pnode = cur;
+ return 0;
+}
+
+
+void
+driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _drmBONode **node)
+{
+ int newItem;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
+ buf->pool->kernel(buf->pool, buf->private),
+ flags, mask, itemLoc, node));
+ BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
+ flags, mask, &newItem));
+ if (newItem)
+ buf->refCount++;
+
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+drmBOList *driGetdrmBOList(struct _DriBufferList *list)
+{
+ driWriteLockKernelBO();
+ return &list->drmBuffers;
+}
+
+void driPutdrmBOList(struct _DriBufferList *list)
+{
+ driWriteUnlockKernelBO();
+}
+
+
+void
+driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->pool->fence)
+ BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+}
+
+void
+driBOUnrefUserList(struct _DriBufferList *list)
+{
+ struct _DriBufferObject *buf;
+ void *curBuf;
+
+ curBuf = drmBOListIterator(&list->driBuffers);
+ while (curBuf) {
+ buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
+ driBOUnReference(buf);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+}
+
+struct _DriFenceObject *
+driBOFenceUserList(struct _DriFenceMgr *mgr,
+ struct _DriBufferList *list, const char *name,
+ drmFence *kFence)
+{
+ struct _DriFenceObject *fence;
+ struct _DriBufferObject *buf;
+ void *curBuf;
+
+ fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
+ kFence, sizeof(*kFence));
+ curBuf = drmBOListIterator(&list->driBuffers);
+
+ /*
+ * User-space fencing callbacks.
+ */
+
+ while (curBuf) {
+ buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
+ driBOFence(buf, fence);
+ driBOUnReference(buf);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+
+ driBOResetList(list);
+ return fence;
+}
+
+void
+driBOValidateUserList(struct _DriBufferList * list)
+{
+ void *curBuf;
+ struct _DriBufferObject *buf;
+
+ curBuf = drmBOListIterator(&list->driBuffers);
+
+ /*
+ * User-space validation callbacks.
+ */
+
+ while (curBuf) {
+ buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->pool->validate)
+ BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ curBuf = drmBOListNext(&list->driBuffers, curBuf);
+ }
+}
+
+
+void
+driPoolTakeDown(struct _DriBufferPool *pool)
+{
+ pool->takeDown(pool);
+
+}
+
+unsigned long
+driBOSize(struct _DriBufferObject *buf)
+{
+ unsigned long size;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ size = buf->pool->size(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+ return size;
+
+}
+
+drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
+{
+ return &list->drmBuffers;
+}
+
+drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
+{
+ return &list->driBuffers;
+}
+
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _PSB_BUFMGR_H_
+#define _PSB_BUFMGR_H_
+#include <xf86mm.h>
+#include "i915_drm.h"
+#include "ws_dri_fencemgr.h"
+
+typedef struct _drmBONode
+{
+ drmMMListHead head;
+ drmBO *buf;
+ struct drm_i915_op_arg bo_arg;
+ uint64_t arg0;
+ uint64_t arg1;
+} drmBONode;
+
+typedef struct _drmBOList {
+ unsigned numTarget;
+ unsigned numCurrent;
+ unsigned numOnList;
+ drmMMListHead list;
+ drmMMListHead free;
+} drmBOList;
+
+
+struct _DriFenceObject;
+struct _DriBufferObject;
+struct _DriBufferPool;
+struct _DriBufferList;
+
+/*
+ * Return a pointer to the libdrm buffer object this DriBufferObject
+ * uses.
+ */
+
+extern drmBO *driBOKernel(struct _DriBufferObject *buf);
+extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
+ unsigned hint);
+extern void driBOUnmap(struct _DriBufferObject *buf);
+extern unsigned long driBOOffset(struct _DriBufferObject *buf);
+extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
+
+extern uint64_t driBOFlags(struct _DriBufferObject *buf);
+extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
+extern void driBOUnReference(struct _DriBufferObject *buf);
+
+extern int driBOData(struct _DriBufferObject *r_buf,
+ unsigned size, const void *data,
+ struct _DriBufferPool *pool, uint64_t flags);
+
+extern void driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ const void *data);
+extern void driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ void *data);
+extern int driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, uint64_t flags, unsigned hint);
+extern void driGenUserBuffer(struct _DriBufferPool *pool,
+ const char *name,
+ struct _DriBufferObject *buffers[],
+ void *ptr, unsigned bytes);
+extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
+extern void driInitBufMgr(int fd);
+extern struct _DriBufferList *driBOCreateList(int target);
+extern int driBOResetList(struct _DriBufferList * list);
+extern void driBOAddListItem(struct _DriBufferList * list,
+ struct _DriBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _drmBONode **node);
+
+extern void driBOValidateList(int fd, struct _DriBufferList * list);
+extern void driBOFreeList(struct _DriBufferList * list);
+extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
+ struct _DriBufferList *list,
+ const char *name,
+ drmFence *kFence);
+extern void driBOUnrefUserList(struct _DriBufferList *list);
+extern void driBOValidateUserList(struct _DriBufferList * list);
+extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
+extern void driPutdrmBOList(struct _DriBufferList *list);
+
+extern void driBOFence(struct _DriBufferObject *buf,
+ struct _DriFenceObject *fence);
+
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+extern void driBOSetReferenced(struct _DriBufferObject *buf,
+ unsigned long handle);
+unsigned long driBOSize(struct _DriBufferObject *buf);
+extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+
+extern void driReadLockKernelBO(void);
+extern void driReadUnlockKernelBO(void);
+extern void driWriteLockKernelBO(void);
+extern void driWriteUnlockKernelBO(void);
+
+/*
+ * For debugging purposes.
+ */
+
+extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
+extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _PSB_BUFPOOL_H_
+#define _PSB_BUFPOOL_H_
+
+#include <xf86drm.h>
+#include <glthread.h>
+struct _DriFenceObject;
+
+typedef struct _DriBufferPool
+{
+ int fd;
+ int (*map) (struct _DriBufferPool * pool, void *private,
+ unsigned flags, int hint, _glthread_Mutex *mutex,
+ void **virtual);
+ int (*unmap) (struct _DriBufferPool * pool, void *private);
+ int (*destroy) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
+ uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*size) (struct _DriBufferPool * pool, void *private);
+ void *(*create) (struct _DriBufferPool * pool, unsigned long size,
+ uint64_t flags, unsigned hint, unsigned alignment);
+ void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
+ int (*unreference) (struct _DriBufferPool * pool, void *private);
+ int (*fence) (struct _DriBufferPool * pool, void *private,
+ struct _DriFenceObject * fence);
+ drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
+ int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
+ int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
+ int lazy);
+ int (*setStatus) (struct _DriBufferPool *pool, void *private,
+ uint64_t flag_diff, uint64_t old_flags);
+ void (*takeDown) (struct _DriBufferPool * pool);
+ void *data;
+} DriBufferPool;
+
+extern void bmError(int val, const char *file, const char *function,
+ int line);
+#define BM_CKFATAL(val) \
+ do{ \
+ int tstVal = (val); \
+ if (tstVal) \
+ bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
+ } while(0);
+
+
+/*
+ * Builtin pools.
+ */
+
+/*
+ * Kernel buffer objects. Size in multiples of page size. Page size aligned.
+ */
+
+extern struct _DriBufferPool *driDRMPoolInit(int fd);
+extern struct _DriBufferPool *driMallocPoolInit(void);
+
+struct _DriFreeSlabManager;
+extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan);
+extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
+extern struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "ws_dri_bufpool.h"
+#include "ws_dri_bufmgr.h"
+#include "assert.h"
+
+/*
+ * Buffer pool implementation using DRM buffer objects as DRI buffer objects.
+ */
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, uint64_t flags, unsigned hint,
+ unsigned alignment)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+ unsigned pageSize = getpagesize();
+
+ if (!buf)
+ return NULL;
+
+ if ((alignment > pageSize) && (alignment % pageSize)) {
+ free(buf);
+ return NULL;
+ }
+
+ ret = drmBOCreate(pool->fd, size, alignment / pageSize,
+ NULL,
+ flags, hint, buf);
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ return (void *) buf;
+}
+
+static void *
+pool_reference(struct _DriBufferPool *pool, unsigned handle)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+
+ if (!buf)
+ return NULL;
+
+ ret = drmBOReference(pool->fd, handle, buf);
+
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ return (void *) buf;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ int ret;
+ drmBO *buf = (drmBO *) private;
+ driReadLockKernelBO();
+ ret = drmBOUnreference(pool->fd, buf);
+ free(buf);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_unreference(struct _DriBufferPool *pool, void *private)
+{
+ int ret;
+ drmBO *buf = (drmBO *) private;
+ driReadLockKernelBO();
+ ret = drmBOUnreference(pool->fd, buf);
+ free(buf);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
+ driReadUnlockKernelBO();
+ return ret;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOUnmap(pool->fd, buf);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ unsigned long offset;
+
+ driReadLockKernelBO();
+ assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
+ offset = buf->offset;
+ driReadUnlockKernelBO();
+
+ return buf->offset;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ return 0;
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ uint64_t flags;
+
+ driReadLockKernelBO();
+ flags = buf->flags;
+ driReadUnlockKernelBO();
+
+ return flags;
+}
+
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ unsigned long size;
+
+ driReadLockKernelBO();
+ size = buf->size;
+ driReadUnlockKernelBO();
+
+ return buf->size;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ /*
+ * Noop. The kernel handles all fencing.
+ */
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ return (drmBO *) private;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
+ int lazy)
+{
+ drmBO *buf = (drmBO *) private;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
+ driReadUnlockKernelBO();
+
+ return ret;
+}
+
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ free(pool);
+}
+
+/*static int
+pool_setStatus(struct _DriBufferPool *pool, void *private,
+ uint64_t flag_diff, uint64_t old_flags)
+{
+ drmBO *buf = (drmBO *) private;
+ uint64_t new_flags = old_flags ^ flag_diff;
+ int ret;
+
+ driReadLockKernelBO();
+ ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
+ 0, 0, 0);
+ driReadUnlockKernelBO();
+ return ret;
+}*/
+
+struct _DriBufferPool *
+driDRMPoolInit(int fd)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+
+ if (!pool)
+ return NULL;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->reference = &pool_reference;
+ pool->unreference = &pool_unreference;
+ pool->data = NULL;
+ return pool;
+}
--- /dev/null
+#include "ws_dri_fencemgr.h"
+#include "glthread.h"
+#include <xf86mm.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Note: Locking order is
+ * _DriFenceObject::mutex
+ * _DriFenceMgr::mutex
+ */
+
+struct _DriFenceMgr {
+ /*
+ * Constant members. Need no mutex protection.
+ */
+ struct _DriFenceMgrCreateInfo info;
+ void *private;
+
+ /*
+ * These members are protected by this->mutex
+ */
+ _glthread_Mutex mutex;
+ int refCount;
+ drmMMListHead *heads;
+ int num_fences;
+};
+
+struct _DriFenceObject {
+
+ /*
+ * These members are constant and need no mutex protection.
+ */
+ struct _DriFenceMgr *mgr;
+ uint32_t fence_class;
+ uint32_t fence_type;
+
+ /*
+ * These members are protected by mgr->mutex.
+ */
+ drmMMListHead head;
+ int refCount;
+
+ /*
+ * These members are protected by this->mutex.
+ */
+ _glthread_Mutex mutex;
+ uint32_t signaled_type;
+ void *private;
+};
+
+uint32_t
+driFenceType(struct _DriFenceObject *fence)
+{
+ return fence->fence_type;
+}
+
+struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
+{
+ struct _DriFenceMgr *tmp;
+ uint32_t i;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->refCount = 1;
+ tmp->info = *info;
+ tmp->num_fences = 0;
+ tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
+ if (!tmp->heads)
+ goto out_err;
+
+ for (i=0; i<tmp->info.num_classes; ++i) {
+ DRMINITLISTHEAD(&tmp->heads[i]);
+ }
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+ return tmp;
+
+ out_err:
+ if (tmp)
+ free(tmp);
+ return NULL;
+}
+
+static void
+driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
+{
+ struct _DriFenceMgr *mgr = *pMgr;
+
+ *pMgr = NULL;
+ if (--mgr->refCount == 0)
+ free(mgr);
+ else
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
+{
+ _glthread_LOCK_MUTEX((*pMgr)->mutex);
+ driFenceMgrUnrefUnlock(pMgr);
+}
+
+static void
+driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceObject *fence = *pFence;
+ struct _DriFenceMgr *mgr = fence->mgr;
+
+ *pFence = NULL;
+ if (--fence->refCount == 0) {
+ DRMLISTDELINIT(&fence->head);
+ if (fence->private)
+ mgr->info.unreference(mgr, &fence->private);
+ --mgr->num_fences;
+ fence->mgr = NULL;
+ --mgr->refCount;
+ free(fence);
+
+ }
+}
+
+
+static void
+driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
+ drmMMListHead *list,
+ uint32_t fence_class,
+ uint32_t fence_type)
+{
+ struct _DriFenceObject *entry;
+ drmMMListHead *prev;
+
+ while(list != &mgr->heads[fence_class]) {
+ entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
+
+ /*
+ * Up refcount so that entry doesn't disappear from under us
+ * when we unlock-relock mgr to get the correct locking order.
+ */
+
+ ++entry->refCount;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ _glthread_LOCK_MUTEX(entry->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+
+ prev = list->prev;
+
+
+
+ if (list->prev == list) {
+
+ /*
+ * Somebody else removed the entry from the list.
+ */
+
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ return;
+ }
+
+ entry->signaled_type |= (fence_type & entry->fence_type);
+ if (entry->signaled_type == entry->fence_type) {
+ DRMLISTDELINIT(list);
+ mgr->info.unreference(mgr, &entry->private);
+ }
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ list = prev;
+ }
+}
+
+
+int
+driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint)
+{
+ struct _DriFenceMgr *mgr = fence->mgr;
+ int ret = 0;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+
+ if ((fence->signaled_type & fence_type) == fence_type)
+ goto out0;
+
+ ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
+ if (ret)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ fence_type);
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
+{
+ uint32_t ret;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = fence->signaled_type;
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ return ret;
+}
+
+int
+driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
+ uint32_t *signaled)
+{
+ int ret = 0;
+ struct _DriFenceMgr *mgr;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ mgr = fence->mgr;
+ *signaled = fence->signaled_type;
+ if ((fence->signaled_type & flush_type) == flush_type)
+ goto out0;
+
+ ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
+ if (ret) {
+ *signaled = fence->signaled_type;
+ goto out0;
+ }
+
+ if ((fence->signaled_type | *signaled) == fence->signaled_type)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ *signaled);
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+struct _DriFenceObject *
+driFenceReference(struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(fence->mgr->mutex);
+ ++fence->refCount;
+ _glthread_UNLOCK_MUTEX(fence->mgr->mutex);
+ return fence;
+}
+
+void
+driFenceUnReference(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceMgr *mgr;
+
+ if (*pFence == NULL)
+ return;
+
+ mgr = (*pFence)->mgr;
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ ++mgr->refCount;
+ driFenceUnReferenceLocked(pFence);
+ driFenceMgrUnrefUnlock(&mgr);
+}
+
+struct _DriFenceObject
+*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
+ uint32_t fence_type, void *private, size_t private_size)
+{
+ struct _DriFenceObject *fence;
+ size_t fence_size = sizeof(*fence);
+
+ if (private_size)
+ fence_size = ((fence_size + 15) & ~15);
+
+ fence = calloc(1, fence_size + private_size);
+
+ if (!fence) {
+ int ret = mgr->info.finish(mgr, private, fence_type, 0);
+
+ if (ret)
+ usleep(10000000);
+
+ return NULL;
+ }
+
+ _glthread_INIT_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ fence->refCount = 1;
+ DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
+ fence->mgr = mgr;
+ ++mgr->refCount;
+ ++mgr->num_fences;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ fence->fence_class = fence_class;
+ fence->fence_type = fence_type;
+ fence->signaled_type = 0;
+ fence->private = private;
+ if (private_size) {
+ fence->private = (void *)(((uint8_t *) fence) + fence_size);
+ memcpy(fence->private, private, private_size);
+ }
+
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return fence;
+}
+
+
+static int
+tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type)
+{
+ long fd = (long) mgr->private;
+ int dummy;
+ drmFence *fence = (drmFence *) private;
+ int ret;
+
+ *signaled_type = 0;
+ ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
+ if (ret)
+ return ret;
+
+ *signaled_type = fence->signaled;
+
+ return 0;
+}
+
+static int
+tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
+ int lazy_hint)
+{
+ long fd = (long) mgr->private;
+ unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
+
+ return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
+}
+
+static int
+tUnref(struct _DriFenceMgr *mgr, void **private)
+{
+ long fd = (long) mgr->private;
+ drmFence *fence = (drmFence *) *private;
+ *private = NULL;
+
+ return drmFenceUnreference(fd, fence);
+}
+
+struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
+{
+ struct _DriFenceMgrCreateInfo info;
+ struct _DriFenceMgr *mgr;
+
+ info.flags = DRI_FENCE_CLASS_ORDERED;
+ info.num_classes = 4;
+ info.signaled = tSignaled;
+ info.finish = tFinish;
+ info.unreference = tUnref;
+
+ mgr = driFenceMgrCreate(&info);
+ if (mgr == NULL)
+ return NULL;
+
+ mgr->private = (void *) (long) fd;
+ return mgr;
+}
+
--- /dev/null
+#ifndef DRI_FENCEMGR_H
+#define DRI_FENCEMGR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+struct _DriFenceObject;
+struct _DriFenceMgr;
+
+/*
+ * Do a quick check to see if the fence manager has registered the fence
+ * object as signaled. Note that this function may return a false negative
+ * answer.
+ */
+extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
+
+/*
+ * Check if the fence object is signaled. This function can be substantially
+ * more expensive to call than the above function, but will not return a false
+ * negative answer. The argument "flush_type" sets the types that the
+ * underlying mechanism must make sure will eventually signal.
+ */
+extern int driFenceSignaledType(struct _DriFenceObject *fence,
+ uint32_t flush_type, uint32_t *signaled);
+
+/*
+ * Convenience functions.
+ */
+
+static inline int driFenceSignaled(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types;
+ int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
+ if (ret)
+ return 0;
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types =
+ driFenceSignaledTypeCached(fence);
+
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+/*
+ * Reference a fence object.
+ */
+extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
+
+/*
+ * Unreference a fence object. The fence object pointer will be reset to NULL.
+ */
+
+extern void driFenceUnReference(struct _DriFenceObject **pFence);
+
+
+/*
+ * Wait for a fence to signal the indicated fence_type.
+ * If "lazy_hint" is true, it indicates that the wait may sleep to avoid
+ * busy-wait polling.
+ */
+extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint);
+
+/*
+ * Create a DriFenceObject for manager "mgr".
+ *
+ * "private" is a pointer that should be used for the callbacks in
+ * struct _DriFenceMgrCreateInfo.
+ *
+ * if private_size is nonzero, then the info stored at *private, with size
+ * private size will be copied and the fence manager will instead use a
+ * pointer to the copied data for the callbacks in
+ * struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
+ * "private" may be destroyed after the call to driFenceCreate.
+ */
+extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ void *private,
+ size_t private_size);
+
+extern uint32_t driFenceType(struct _DriFenceObject *fence);
+
+/*
+ * Fence creations are ordered. If a fence signals a fence_type,
+ * it is safe to assume that all fences of the same class that was
+ * created before that fence has signaled the same type.
+ */
+
+#define DRI_FENCE_CLASS_ORDERED (1 << 0)
+
+struct _DriFenceMgrCreateInfo {
+ uint32_t flags;
+ uint32_t num_classes;
+ int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type);
+ int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
+ int (*unreference) (struct _DriFenceMgr *mgr, void **private);
+};
+
+extern struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
+
+extern struct _DriFenceMgr *
+driFenceMgrTTMInit(int fd);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <errno.h>
+#include "pipe/p_debug.h"
+#include "glthread.h"
+#include "ws_dri_bufpool.h"
+#include "ws_dri_bufmgr.h"
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, uint64_t flags, unsigned hint,
+ unsigned alignment)
+{
+ unsigned long *private = malloc(size + 2*sizeof(unsigned long));
+ if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
+ abort();
+
+ *private = size;
+ return (void *)private;
+}
+
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ free(private);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private,
+ _glthread_Mutex *mutex, int lazy)
+{
+ return 0;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ *virtual = (void *)((unsigned long *)private + 2);
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ /*
+ * BUG
+ */
+ abort();
+ return 0UL;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ /*
+ * BUG
+ */
+ abort();
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ return *(unsigned long *) private;
+}
+
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ abort();
+ return 0UL;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ abort();
+ return NULL;
+}
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ free(pool);
+}
+
+
+struct _DriBufferPool *
+driMallocPoolInit(void)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ pool->data = NULL;
+ pool->fd = -1;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ return pool;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+#include "ws_dri_bufmgr.h"
+#include "glthread.h"
+
+#define DRI_SLABPOOL_ALLOC_RETRIES 100
+
+struct _DriSlab;
+
+struct _DriSlabBuffer {
+ int isSlabBuffer;
+ drmBO *bo;
+ struct _DriFenceObject *fence;
+ struct _DriSlab *parent;
+ drmMMListHead head;
+ uint32_t mapCount;
+ uint32_t start;
+ uint32_t fenceType;
+ int unFenced;
+ _glthread_Cond event;
+};
+
+struct _DriKernelBO {
+ int fd;
+ drmBO bo;
+ drmMMListHead timeoutHead;
+ drmMMListHead head;
+ struct timeval timeFreed;
+ uint32_t pageAlignment;
+ void *virtual;
+};
+
+struct _DriSlab{
+ drmMMListHead head;
+ drmMMListHead freeBuffers;
+ uint32_t numBuffers;
+ uint32_t numFree;
+ struct _DriSlabBuffer *buffers;
+ struct _DriSlabSizeHeader *header;
+ struct _DriKernelBO *kbo;
+};
+
+
+struct _DriSlabSizeHeader {
+ drmMMListHead slabs;
+ drmMMListHead freeSlabs;
+ drmMMListHead delayedBuffers;
+ uint32_t numDelayed;
+ struct _DriSlabPool *slabPool;
+ uint32_t bufSize;
+ _glthread_Mutex mutex;
+};
+
+struct _DriFreeSlabManager {
+ struct timeval slabTimeout;
+ struct timeval checkInterval;
+ struct timeval nextCheck;
+ drmMMListHead timeoutList;
+ drmMMListHead unCached;
+ drmMMListHead cached;
+ _glthread_Mutex mutex;
+};
+
+
+struct _DriSlabPool {
+
+ /*
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+
+ struct _DriFreeSlabManager *fMan;
+ uint64_t proposedFlags;
+ uint64_t validMask;
+ uint32_t *bucketSizes;
+ uint32_t numBuckets;
+ uint32_t pageSize;
+ int fd;
+ int pageAlignment;
+ int maxSlabSize;
+ int desiredNumBuffers;
+ struct _DriSlabSizeHeader *headers;
+};
+
+/*
+ * FIXME: Perhaps arrange timeout slabs in size buckets for fast
+ * retreival??
+ */
+
+
+static inline int
+driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
+{
+ return ((arg1->tv_sec > arg2->tv_sec) ||
+ ((arg1->tv_sec == arg2->tv_sec) &&
+ (arg1->tv_usec > arg2->tv_usec)));
+}
+
+static inline void
+driTimeAdd(struct timeval *arg, struct timeval *add)
+{
+ unsigned int sec;
+
+ arg->tv_sec += add->tv_sec;
+ arg->tv_usec += add->tv_usec;
+ sec = arg->tv_usec / 1000000;
+ arg->tv_sec += sec;
+ arg->tv_usec -= sec*1000000;
+}
+
+static void
+driFreeKernelBO(struct _DriKernelBO *kbo)
+{
+ if (!kbo)
+ return;
+
+ (void) drmBOUnreference(kbo->fd, &kbo->bo);
+ free(kbo);
+}
+
+
+static void
+driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
+ struct timeval *time)
+{
+ drmMMListHead *list, *next;
+ struct _DriKernelBO *kbo;
+
+ if (!driTimeAfterEq(time, &fMan->nextCheck))
+ return;
+
+ for (list = fMan->timeoutList.next, next = list->next;
+ list != &fMan->timeoutList;
+ list = next, next = list->next) {
+
+ kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
+
+ if (!driTimeAfterEq(time, &kbo->timeFreed))
+ break;
+
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ DRMLISTDELINIT(&kbo->head);
+ driFreeKernelBO(kbo);
+ }
+
+ fMan->nextCheck = *time;
+ driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
+}
+
+
+/*
+ * Add a _DriKernelBO to the free slab manager.
+ * This means that it is available for reuse, but if it's not
+ * reused in a while, it will be freed.
+ */
+
+static void
+driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
+ struct _DriKernelBO *kbo)
+{
+ struct timeval time;
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ gettimeofday(&time, NULL);
+ driTimeAdd(&time, &fMan->slabTimeout);
+
+ kbo->timeFreed = time;
+
+ if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
+ DRMLISTADD(&kbo->head, &fMan->cached);
+ else
+ DRMLISTADD(&kbo->head, &fMan->unCached);
+
+ DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+}
+
+/*
+ * Get a _DriKernelBO for us to use as storage for a slab.
+ *
+ */
+
+static struct _DriKernelBO *
+driAllocKernelBO(struct _DriSlabSizeHeader *header)
+
+{
+ struct _DriSlabPool *slabPool = header->slabPool;
+ struct _DriFreeSlabManager *fMan = slabPool->fMan;
+ drmMMListHead *list, *next, *head;
+ uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
+ struct _DriKernelBO *kbo;
+ struct _DriKernelBO *kboTmp;
+ int ret;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+ size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
+ size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
+ _glthread_LOCK_MUTEX(fMan->mutex);
+
+ kbo = NULL;
+
+ retry:
+ head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
+ &fMan->cached : &fMan->unCached;
+
+ for (list = head->next, next = list->next;
+ list != head;
+ list = next, next = list->next) {
+
+ kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
+
+ if ((kboTmp->bo.size == size) &&
+ (slabPool->pageAlignment == 0 ||
+ (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
+
+ if (!kbo)
+ kbo = kboTmp;
+
+ if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
+ break;
+
+ }
+ }
+
+ if (kbo) {
+ DRMLISTDELINIT(&kbo->head);
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ }
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ if (kbo) {
+ uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
+
+ ret = 0;
+ if (new_mask) {
+ ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
+ new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
+ }
+ if (ret == 0)
+ return kbo;
+
+ driFreeKernelBO(kbo);
+ kbo = NULL;
+ goto retry;
+ }
+
+ kbo = calloc(1, sizeof(struct _DriKernelBO));
+ if (!kbo)
+ return NULL;
+
+ kbo->fd = slabPool->fd;
+ DRMINITLISTHEAD(&kbo->head);
+ DRMINITLISTHEAD(&kbo->timeoutHead);
+ ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
+ slabPool->proposedFlags,
+ DRM_BO_HINT_DONT_FENCE, &kbo->bo);
+ if (ret)
+ goto out_err0;
+
+ ret = drmBOMap(kbo->fd, &kbo->bo,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &kbo->virtual);
+
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOUnmap(kbo->fd, &kbo->bo);
+ if (ret)
+ goto out_err1;
+
+ return kbo;
+
+ out_err1:
+ drmBOUnreference(kbo->fd, &kbo->bo);
+ out_err0:
+ free(kbo);
+ return NULL;
+}
+
+
+static int
+driAllocSlab(struct _DriSlabSizeHeader *header)
+{
+ struct _DriSlab *slab;
+ struct _DriSlabBuffer *buf;
+ uint32_t numBuffers;
+ int ret;
+ int i;
+
+ slab = calloc(1, sizeof(*slab));
+ if (!slab)
+ return -ENOMEM;
+
+ slab->kbo = driAllocKernelBO(header);
+ if (!slab->kbo) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ numBuffers = slab->kbo->bo.size / header->bufSize;
+
+ slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ DRMINITLISTHEAD(&slab->head);
+ DRMINITLISTHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->header = header;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ buf->parent = slab;
+ buf->start = i* header->bufSize;
+ buf->mapCount = 0;
+ buf->isSlabBuffer = 1;
+ _glthread_INIT_COND(buf->event);
+ DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ return 0;
+
+ out_err1:
+ driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
+ free(slab->buffers);
+ out_err0:
+ free(slab);
+ return ret;
+}
+
+/*
+ * Delete a buffer from the slab header delayed list and put
+ * it on the slab free list.
+ */
+
+static void
+driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
+{
+ struct _DriSlab *slab = buf->parent;
+ struct _DriSlabSizeHeader *header = slab->header;
+ drmMMListHead *list = &buf->head;
+
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &header->freeSlabs);
+ }
+
+ if (header->slabs.next == &header->slabs ||
+ slab->numFree != slab->numBuffers) {
+
+ drmMMListHead *next;
+ struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
+
+ for (list = header->freeSlabs.next, next = list->next;
+ list != &header->freeSlabs;
+ list = next, next = list->next) {
+
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+
+ DRMLISTDELINIT(list);
+ driSetKernelBOFree(fMan, slab->kbo);
+ free(slab->buffers);
+ free(slab);
+ }
+ }
+}
+
+static void
+driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
+{
+ drmMMListHead *list, *prev, *first;
+ struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+ int firstWasSignaled = 1;
+ int signaled;
+ int i;
+ int ret;
+
+ /*
+ * Rerun the freeing test if the youngest tested buffer
+ * was signaled, since there might be more idle buffers
+ * in the delay list.
+ */
+
+ while (firstWasSignaled) {
+ firstWasSignaled = 0;
+ signaled = 0;
+ first = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ first = first->next;
+ }
+ }
+
+ for (list = first, prev = list->prev;
+ list != &header->delayedBuffers;
+ list = prev, prev = list->prev) {
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ slab = buf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = driFenceFinish(buf->fence, buf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ wait = 0;
+ } else {
+ signaled = driFenceSignaled(buf->fence, buf->fenceType);
+ }
+ if (signaled) {
+ if (list == first)
+ firstWasSignaled = 1;
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ }
+ }
+}
+
+
+static struct _DriSlabBuffer *
+driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
+{
+ static struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+ drmMMListHead *list;
+ int count = DRI_SLABPOOL_ALLOC_RETRIES;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ while(header->slabs.next == &header->slabs && count > 0) {
+ driSlabCheckFreeLocked(header, 0);
+ if (header->slabs.next != &header->slabs)
+ break;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ if (count != DRI_SLABPOOL_ALLOC_RETRIES)
+ usleep(1);
+ _glthread_LOCK_MUTEX(header->mutex);
+ (void) driAllocSlab(header);
+ count--;
+ }
+
+ list = header->slabs.next;
+ if (list == &header->slabs) {
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ return NULL;
+ }
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+ if (--slab->numFree == 0)
+ DRMLISTDELINIT(list);
+
+ list = slab->freeBuffers.next;
+ DRMLISTDELINIT(list);
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ return buf;
+}
+
+static void *
+pool_create(struct _DriBufferPool *driPool, unsigned long size,
+ uint64_t flags, unsigned hint, unsigned alignment)
+{
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ struct _DriSlabSizeHeader *header;
+ struct _DriSlabBuffer *buf;
+ void *dummy;
+ int i;
+ int ret;
+
+ /*
+ * FIXME: Check for compatibility.
+ */
+
+ header = pool->headers;
+ for (i=0; i<pool->numBuckets; ++i) {
+ if (header->bufSize >= size)
+ break;
+ header++;
+ }
+
+ if (i < pool->numBuckets)
+ return driSlabAllocBuffer(header);
+
+
+ /*
+ * Fall back to allocate a buffer object directly from DRM.
+ * and wrap it in a driBO structure.
+ */
+
+
+ buf = calloc(1, sizeof(*buf));
+
+ if (!buf)
+ return NULL;
+
+ buf->bo = calloc(1, sizeof(*buf->bo));
+ if (!buf->bo)
+ goto out_err0;
+
+ if (alignment) {
+ if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
+ goto out_err1;
+ if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
+ goto out_err1;
+ }
+
+ ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
+ flags, hint, buf->bo);
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &dummy);
+ if (ret)
+ goto out_err2;
+
+ ret = drmBOUnmap(pool->fd, buf->bo);
+ if (ret)
+ goto out_err2;
+
+ return buf;
+ out_err2:
+ drmBOUnreference(pool->fd, buf->bo);
+ out_err1:
+ free(buf->bo);
+ out_err0:
+ free(buf);
+ return NULL;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *driPool, void *private)
+{
+ struct _DriSlabBuffer *buf =
+ (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ int ret;
+
+ ret = drmBOUnreference(pool->fd, buf->bo);
+ free(buf->bo);
+ free(buf);
+ return ret;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ buf->unFenced = 0;
+ buf->mapCount = 0;
+
+ if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
+ header->numDelayed++;
+ } else {
+ if (buf->fence)
+ driFenceUnReference(&buf->fence);
+ driSlabFreeBufferLocked(buf);
+ }
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *driPool, void *private,
+ _glthread_Mutex *mutex, int lazy)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ while(buf->unFenced)
+ _glthread_COND_WAIT(buf->event, *mutex);
+
+ if (!buf->fence)
+ return 0;
+
+ driFenceFinish(buf->fence, buf->fenceType, lazy);
+ driFenceUnReference(&buf->fence);
+
+ return 0;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, _glthread_Mutex *mutex, void **virtual)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ int busy;
+
+ if (buf->isSlabBuffer)
+ busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
+ else
+ busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
+
+
+ if (busy) {
+ if (hint & DRM_BO_HINT_DONT_BLOCK)
+ return -EBUSY;
+ else {
+ (void) pool_waitIdle(pool, private, mutex, 0);
+ }
+ }
+
+ ++buf->mapCount;
+ *virtual = (buf->isSlabBuffer) ?
+ (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
+ (void *) buf->bo->virtual;
+
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ --buf->mapCount;
+ if (buf->mapCount == 0 && buf->isSlabBuffer)
+ _glthread_COND_BROADCAST(buf->event);
+
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return buf->bo->offset;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ (void) header;
+ assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return slab->kbo->bo.offset + buf->start;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return buf->start;
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->isSlabBuffer)
+ return buf->bo->flags;
+
+ return buf->parent->kbo->bo.flags;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ if (!buf->isSlabBuffer)
+ return buf->bo->size;
+
+ return buf->parent->header->bufSize;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ drmBO *bo;
+
+ if (buf->fence)
+ driFenceUnReference(&buf->fence);
+
+ buf->fence = driFenceReference(fence);
+ bo = (buf->isSlabBuffer) ?
+ &buf->parent->kbo->bo:
+ buf->bo;
+ buf->fenceType = bo->fenceFlags;
+
+ buf->unFenced = 0;
+ _glthread_COND_BROADCAST(buf->event);
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
+}
+
+static int
+pool_validate(struct _DriBufferPool *pool, void *private,
+ _glthread_Mutex *mutex)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->isSlabBuffer)
+ return 0;
+
+ while(buf->mapCount != 0)
+ _glthread_COND_WAIT(buf->event, *mutex);
+
+ buf->unFenced = 1;
+ return 0;
+}
+
+
+struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
+{
+ struct _DriFreeSlabManager *tmp;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+
+ tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+
+ gettimeofday(&tmp->nextCheck, NULL);
+ driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
+ DRMINITLISTHEAD(&tmp->timeoutList);
+ DRMINITLISTHEAD(&tmp->unCached);
+ DRMINITLISTHEAD(&tmp->cached);
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+
+ return tmp;
+}
+
+void
+driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
+{
+ struct timeval time;
+
+ time = fMan->nextCheck;
+ driTimeAdd(&time, &fMan->checkInterval);
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ assert(fMan->timeoutList.next == &fMan->timeoutList);
+ assert(fMan->unCached.next == &fMan->unCached);
+ assert(fMan->cached.next == &fMan->cached);
+
+ free(fMan);
+}
+
+static void
+driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
+ struct _DriSlabSizeHeader *header)
+{
+ _glthread_INIT_MUTEX(header->mutex);
+ _glthread_LOCK_MUTEX(header->mutex);
+
+ DRMINITLISTHEAD(&header->slabs);
+ DRMINITLISTHEAD(&header->freeSlabs);
+ DRMINITLISTHEAD(&header->delayedBuffers);
+
+ header->numDelayed = 0;
+ header->slabPool = pool;
+ header->bufSize = size;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+driFinishSizeHeader(struct _DriSlabSizeHeader *header)
+{
+ drmMMListHead *list, *next;
+ struct _DriSlabBuffer *buf;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ for (list = header->delayedBuffers.next, next = list->next;
+ list != &header->delayedBuffers;
+ list = next, next = list->next) {
+
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
+ if (buf->fence) {
+ (void) driFenceFinish(buf->fence, buf->fenceType, 0);
+ driFenceUnReference(&buf->fence);
+ }
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+pool_takedown(struct _DriBufferPool *driPool)
+{
+ struct _DriSlabPool *pool = driPool->data;
+ int i;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ driFinishSizeHeader(&pool->headers[i]);
+ }
+
+ free(pool->headers);
+ free(pool->bucketSizes);
+ free(pool);
+ free(driPool);
+}
+
+struct _DriBufferPool *
+driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan)
+{
+ struct _DriBufferPool *driPool;
+ struct _DriSlabPool *pool;
+ uint32_t i;
+
+ driPool = calloc(1, sizeof(*driPool));
+ if (!driPool)
+ return NULL;
+
+ pool = calloc(1, sizeof(*pool));
+ if (!pool)
+ goto out_err0;
+
+ pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
+ if (!pool->bucketSizes)
+ goto out_err1;
+
+ pool->headers = calloc(numSizes, sizeof(*pool->headers));
+ if (!pool->headers)
+ goto out_err2;
+
+ pool->fMan = fMan;
+ pool->proposedFlags = flags;
+ pool->validMask = validMask;
+ pool->numBuckets = numSizes;
+ pool->pageSize = getpagesize();
+ pool->fd = fd;
+ pool->pageAlignment = pageAlignment;
+ pool->maxSlabSize = maxSlabSize;
+ pool->desiredNumBuffers = desiredNumBuffers;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ pool->bucketSizes[i] = (smallestSize << i);
+ driInitSizeHeader(pool, pool->bucketSizes[i],
+ &pool->headers[i]);
+ }
+
+ driPool->data = (void *) pool;
+ driPool->map = &pool_map;
+ driPool->unmap = &pool_unmap;
+ driPool->destroy = &pool_destroy;
+ driPool->offset = &pool_offset;
+ driPool->poolOffset = &pool_poolOffset;
+ driPool->flags = &pool_flags;
+ driPool->size = &pool_size;
+ driPool->create = &pool_create;
+ driPool->fence = &pool_fence;
+ driPool->kernel = &pool_kernel;
+ driPool->validate = &pool_validate;
+ driPool->waitIdle = &pool_waitIdle;
+ driPool->takeDown = &pool_takedown;
+
+ return driPool;
+
+ out_err2:
+ free(pool->bucketSizes);
+ out_err1:
+ free(pool);
+ out_err0:
+ free(driPool);
+
+ return NULL;
+}
-I$(TOP)/src/gallium/include \
-I$(TOP)/src/gallium/auxiliary \
-I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/winsys/common \
-I$(TOP)/src/mesa \
-I$(TOP)/src/mesa/main \
-I$(TOP)/src/mesa/glapi \
LIBNAME = i915_dri.so
-MINIGLX_SOURCES = server/intel_dri.c
-
PIPE_DRIVERS = \
$(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
- $(TOP)/src/gallium/drivers/i915simple/libi915simple.a
+ $(TOP)/src/gallium/drivers/i915simple/libi915simple.a \
+ $(TOP)/src/gallium/winsys/common/intel_drm/libinteldrm.a
DRIVER_SOURCES = \
- intel_winsys_pipe.c \
intel_winsys_softpipe.c \
- intel_winsys_i915.c \
- intel_batchbuffer.c \
intel_swapbuffers.c \
intel_context.c \
intel_lock.c \
- intel_screen.c \
- ws_dri_bufmgr.c \
- ws_dri_drmpool.c \
- ws_dri_fencemgr.c \
- ws_dri_mallocpool.c \
- ws_dri_slabpool.c
+ intel_screen.c
C_SOURCES = \
$(COMMON_GALLIUM_SOURCES) \
include ../Makefile.template
-intel_tex_layout.o: $(TOP)/src/mesa/drivers/dri/intel/intel_tex_layout.c
+#intel_tex_layout.o: $(TOP)/src/mesa/drivers/dri/intel/intel_tex_layout.c
symlinks:
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "intel_batchbuffer.h"
-#include "intel_context.h"
-#include <errno.h>
-
-#if 0
-static void
-intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
-{
- int i;
- fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
- for (i = 0; i < count / 4; i += 4)
- fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
- offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
- fprintf(stderr, "END BATCH\n\n\n");
-}
-#endif
-
-static void
-intel_realloc_relocs(struct intel_batchbuffer *batch, int num_relocs)
-{
- unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
-
- size *= sizeof(uint32_t);
- batch->reloc = realloc(batch->reloc, size);
- batch->reloc_size = num_relocs;
-}
-
-
-void
-intel_batchbuffer_reset(struct intel_batchbuffer *batch)
-{
- /*
- * Get a new, free batchbuffer.
- */
- drmBO *bo;
- struct drm_bo_info_req *req;
-
- driBOUnrefUserList(batch->list);
- driBOResetList(batch->list);
-
- /* base.size is the size available to the i915simple driver */
- batch->base.size = batch->intel->intelScreen->max_batch_size - BATCH_RESERVED;
- batch->base.actual_size = batch->intel->intelScreen->max_batch_size;
- driBOData(batch->buffer, batch->base.actual_size, NULL, NULL, 0);
-
- /*
- * Add the batchbuffer to the validate list.
- */
-
- driBOAddListItem(batch->list, batch->buffer,
- DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
- DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
- &batch->dest_location, &batch->node);
-
- req = &batch->node->bo_arg.d.req.bo_req;
-
- /*
- * Set up information needed for us to make relocations
- * relative to the underlying drm buffer objects.
- */
-
- driReadLockKernelBO();
- bo = driBOKernel(batch->buffer);
- req->presumed_offset = (uint64_t) bo->offset;
- req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
- batch->drmBOVirtual = (uint8_t *) bo->virtual;
- driReadUnlockKernelBO();
-
- /*
- * Adjust the relocation buffer size.
- */
-
- if (batch->reloc_size > INTEL_MAX_RELOCS ||
- batch->reloc == NULL)
- intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
-
- assert(batch->reloc != NULL);
- batch->reloc[0] = 0; /* No relocs yet. */
- batch->reloc[1] = 1; /* Reloc type 1 */
- batch->reloc[2] = 0; /* Only a single relocation list. */
- batch->reloc[3] = 0; /* Only a single relocation list. */
-
- batch->base.map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
- batch->poolOffset = driBOPoolOffset(batch->buffer);
- batch->base.ptr = batch->base.map;
- batch->dirty_state = ~0;
- batch->nr_relocs = 0;
- batch->flags = 0;
- batch->id = 0;//batch->intel->intelScreen->batch_id++;
-}
-
-/*======================================================================
- * Public functions
- */
-struct intel_batchbuffer *
-intel_batchbuffer_alloc(struct intel_context *intel)
-{
- struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
-
- batch->intel = intel;
-
- driGenBuffers(intel->intelScreen->batchPool, "batchbuffer", 1,
- &batch->buffer, 4096,
- DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
- batch->last_fence = NULL;
- batch->list = driBOCreateList(20);
- batch->reloc = NULL;
- intel_batchbuffer_reset(batch);
- return batch;
-}
-
-void
-intel_batchbuffer_free(struct intel_batchbuffer *batch)
-{
- if (batch->last_fence) {
- driFenceFinish(batch->last_fence,
- DRM_FENCE_TYPE_EXE, GL_FALSE);
- driFenceUnReference(&batch->last_fence);
- }
- if (batch->base.map) {
- driBOUnmap(batch->buffer);
- batch->base.map = NULL;
- }
- driBOUnReference(batch->buffer);
- driBOFreeList(batch->list);
- if (batch->reloc)
- free(batch->reloc);
- batch->buffer = NULL;
- free(batch);
-}
-
-void
-intel_offset_relocation(struct intel_batchbuffer *batch,
- unsigned pre_add,
- struct _DriBufferObject *driBO,
- uint64_t val_flags,
- uint64_t val_mask)
-{
- int itemLoc;
- struct _drmBONode *node;
- uint32_t *reloc;
- struct drm_bo_info_req *req;
-
- driBOAddListItem(batch->list, driBO, val_flags, val_mask,
- &itemLoc, &node);
- req = &node->bo_arg.d.req.bo_req;
-
- if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
-
- /*
- * Stop other threads from tampering with the underlying
- * drmBO while we're reading its offset.
- */
-
- driReadLockKernelBO();
- req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
- driReadUnlockKernelBO();
- req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
- }
-
- pre_add += driBOPoolOffset(driBO);
-
- if (batch->nr_relocs == batch->reloc_size)
- intel_realloc_relocs(batch, batch->reloc_size * 2);
-
- reloc = batch->reloc +
- (I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
-
- reloc[0] = ((uint8_t *)batch->base.ptr - batch->drmBOVirtual);
- intel_batchbuffer_emit_dword(batch, req->presumed_offset + pre_add);
- reloc[1] = pre_add;
- reloc[2] = itemLoc;
- reloc[3] = batch->dest_location;
- batch->nr_relocs++;
-}
-
-static void
-i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
-{
- buf->handle = rep->handle;
- buf->flags = rep->flags;
- buf->size = rep->size;
- buf->offset = rep->offset;
- buf->mapHandle = rep->arg_handle;
- buf->proposedFlags = rep->proposed_flags;
- buf->start = rep->buffer_start;
- buf->fenceFlags = rep->fence_flags;
- buf->replyFlags = rep->rep_flags;
- buf->pageAlignment = rep->page_alignment;
-}
-
-static int
-i915_execbuf(struct intel_batchbuffer *batch,
- GLuint used,
- GLboolean ignore_cliprects,
- drmBOList *list,
- struct drm_i915_execbuffer *ea)
-{
- struct intel_context *intel = batch->intel;
- drmBONode *node;
- drmMMListHead *l;
- struct drm_i915_op_arg *arg, *first;
- struct drm_bo_op_req *req;
- struct drm_bo_info_rep *rep;
- uint64_t *prevNext = NULL;
- drmBO *buf;
- int ret = 0;
- uint32_t count = 0;
-
- first = NULL;
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
-
- arg = &node->bo_arg;
- req = &arg->d.req;
-
- if (!first)
- first = arg;
-
- if (prevNext)
- *prevNext = (unsigned long)arg;
-
- prevNext = &arg->next;
- req->bo_req.handle = node->buf->handle;
- req->op = drm_bo_validate;
- req->bo_req.flags = node->arg0;
- req->bo_req.mask = node->arg1;
- req->bo_req.hint |= 0;
- count++;
- }
-
- memset(ea, 0, sizeof(*ea));
- ea->num_buffers = count;
- ea->batch.start = batch->poolOffset;
- ea->batch.used = used;
-#if 0 /* ZZZ JB: no cliprects used */
- ea->batch.cliprects = intel->pClipRects;
- ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
- ea->batch.DR1 = 0;
- ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
- (((GLuint) intel->drawY) << 16));
-#else
- ea->batch.cliprects = NULL;
- ea->batch.num_cliprects = 0;
- ea->batch.DR1 = 0;
- ea->batch.DR4 = 0;
-#endif
- ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
- ea->ops_list = (unsigned long) first;
- first->reloc_ptr = (unsigned long) batch->reloc;
- batch->reloc[0] = batch->nr_relocs;
-
- //return -EFAULT;
- do {
- ret = drmCommandWriteRead(intel->driFd, DRM_I915_EXECBUFFER, ea,
- sizeof(*ea));
- } while (ret == -EAGAIN);
-
- if (ret != 0)
- return ret;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
- arg = &node->bo_arg;
- rep = &arg->d.rep.bo_info;
-
- if (!arg->handled) {
- return -EFAULT;
- }
- if (arg->d.rep.ret)
- return arg->d.rep.ret;
-
- buf = node->buf;
- i915_drm_copy_reply(rep, buf);
- }
- return 0;
-}
-
-/* TODO: Push this whole function into bufmgr.
- */
-static struct _DriFenceObject *
-do_flush_locked(struct intel_batchbuffer *batch,
- GLuint used,
- GLboolean ignore_cliprects, GLboolean allow_unlock)
-{
- struct intel_context *intel = batch->intel;
- struct _DriFenceObject *fo;
- drmFence fence;
- drmBOList *boList;
- struct drm_i915_execbuffer ea;
- int ret = 0;
-
- driBOValidateUserList(batch->list);
- boList = driGetdrmBOList(batch->list);
-
-#if 0 /* ZZZ JB Allways run */
- if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
-#else
- if (1) {
-#endif
- ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
- } else {
- driPutdrmBOList(batch->list);
- fo = NULL;
- goto out;
- }
- driPutdrmBOList(batch->list);
- if (ret)
- abort();
-
- if (ea.fence_arg.error != 0) {
-
- /*
- * The hardware has been idled by the kernel.
- * Don't fence the driBOs.
- */
-
- if (batch->last_fence)
- driFenceUnReference(&batch->last_fence);
-#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
- _mesa_printf("fence error\n");
-#endif
- batch->last_fence = NULL;
- fo = NULL;
- goto out;
- }
-
- fence.handle = ea.fence_arg.handle;
- fence.fence_class = ea.fence_arg.fence_class;
- fence.type = ea.fence_arg.type;
- fence.flags = ea.fence_arg.flags;
- fence.signaled = ea.fence_arg.signaled;
-
- fo = driBOFenceUserList(batch->intel->intelScreen->mgr, batch->list,
- "SuperFence", &fence);
-
- if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
- if (batch->last_fence)
- driFenceUnReference(&batch->last_fence);
- /*
- * FIXME: Context last fence??
- */
- batch->last_fence = fo;
- driFenceReference(fo);
- }
- out:
-#if 0 /* ZZZ JB: fix this */
- intel->vtbl.lost_hardware(intel);
-#else
- (void)intel;
-#endif
- return fo;
-}
-
-
-struct _DriFenceObject *
-intel_batchbuffer_flush(struct intel_batchbuffer *batch)
-{
- struct intel_context *intel = batch->intel;
- GLuint used = batch->base.ptr - batch->base.map;
- GLboolean was_locked = intel->locked;
- struct _DriFenceObject *fence;
-
- if (used == 0) {
- driFenceReference(batch->last_fence);
- return batch->last_fence;
- }
-
- /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
- * performance drain that we would like to avoid.
- */
-#if 0 /* ZZZ JB: what should we do here? */
- if (used & 4) {
- ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->base.ptr)[1] = 0;
- ((int *) batch->base.ptr)[2] = MI_BATCH_BUFFER_END;
- used += 12;
- }
- else {
- ((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->base.ptr)[1] = MI_BATCH_BUFFER_END;
- used += 8;
- }
-#else
- if (used & 4) {
- ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
- ((int *) batch->base.ptr)[1] = 0;
- ((int *) batch->base.ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
- used += 12;
- }
- else {
- ((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
- ((int *) batch->base.ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
- used += 8;
- }
-#endif
- driBOUnmap(batch->buffer);
- batch->base.ptr = NULL;
- batch->base.map = NULL;
-
- /* TODO: Just pass the relocation list and dma buffer up to the
- * kernel.
- */
- if (!was_locked)
- LOCK_HARDWARE(intel);
-
- fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
- GL_FALSE);
-
- if (!was_locked)
- UNLOCK_HARDWARE(intel);
-
- /* Reset the buffer:
- */
- intel_batchbuffer_reset(batch);
- return fence;
-}
-
-void
-intel_batchbuffer_finish(struct intel_batchbuffer *batch)
-{
- struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
- driFenceFinish(fence, driFenceType(fence), GL_FALSE);
- driFenceUnReference(&fence);
-}
-
-void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, GLuint bytes, GLuint flags)
-{
- assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes, flags);
- memcpy(batch->base.ptr, data, bytes);
- batch->base.ptr += bytes;
-}
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
-#include "mtypes.h"
-#include "ws_dri_bufmgr.h"
-#include "i915simple/i915_batch.h"
+#include "intel_drm/intel_be_batchbuffer.h"
-struct intel_context;
-
-#define BATCH_SZ 16384
-#define BATCH_RESERVED 16
-
-#define INTEL_DEFAULT_RELOCS 100
-#define INTEL_MAX_RELOCS 400
-
-#define INTEL_BATCH_NO_CLIPRECTS 0x1
-#define INTEL_BATCH_CLIPRECTS 0x2
-
-struct intel_batchbuffer
-{
- struct i915_batchbuffer base;
-
- struct intel_context *intel;
-
- struct _DriBufferObject *buffer;
- struct _DriFenceObject *last_fence;
- GLuint flags;
-
- struct _DriBufferList *list;
- GLuint list_count;
-
- uint32_t *reloc;
- GLuint reloc_size;
- GLuint nr_relocs;
-
- GLuint dirty_state;
- GLuint id;
-
- uint32_t poolOffset;
- uint8_t *drmBOVirtual;
- struct _drmBONode *node; /* Validation list node for this buffer */
- int dest_location; /* Validation list sequence for this buffer */
-};
-
-struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
- *intel);
-
-void intel_batchbuffer_free(struct intel_batchbuffer *batch);
-
-
-void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
-
-struct _DriFenceObject *intel_batchbuffer_flush(struct intel_batchbuffer
- *batch);
-
-void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
-
-
-/* Unlike bmBufferData, this currently requires the buffer be mapped.
- * Consider it a convenience function wrapping multple
- * intel_buffer_dword() calls.
- */
-void intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, GLuint bytes, GLuint flags);
-
-void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
- GLuint bytes);
-
-void
-intel_offset_relocation(struct intel_batchbuffer *batch,
- unsigned pre_add,
- struct _DriBufferObject *driBO,
- uint64_t val_flags,
- uint64_t val_mask);
-
-/* Inline functions - might actually be better off with these
- * non-inlined. Certainly better off switching all command packets to
- * be passed as structs rather than dwords, but that's a little bit of
- * work...
- */
-static INLINE GLuint
-intel_batchbuffer_space(struct intel_batchbuffer *batch)
-{
- return (batch->base.size - BATCH_RESERVED) - (batch->base.ptr - batch->base.map);
-}
-
-
-static INLINE void
-intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
-{
- assert(batch->base.map);
- assert(intel_batchbuffer_space(batch) >= 4);
- *(GLuint *) (batch->base.ptr) = dword;
- batch->base.ptr += 4;
-}
-
-static INLINE void
-intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
- GLuint sz, GLuint flags)
-{
- struct _DriFenceObject *fence;
-
- assert(sz < batch->base.size - 8);
- if (intel_batchbuffer_space(batch) < sz ||
- (batch->flags != 0 && flags != 0 && batch->flags != flags)) {
- fence = intel_batchbuffer_flush(batch);
- driFenceUnReference(&fence);
- }
-
- batch->flags |= flags;
-}
-
-/* Here are the crusty old macros, to be removed:
+/*
+ * Need to redefine the BATCH defines
*/
-#undef BATCH_LOCALS
-#define BATCH_LOCALS
#undef BEGIN_BATCH
-#define BEGIN_BATCH(n, flags) do { \
- assert(!intel->prim.flush); \
- intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
-} while (0)
+#define BEGIN_BATCH(dwords, relocs) \
+ (i915_batchbuffer_check(&intel->base.batch->base, dwords, relocs))
#undef OUT_BATCH
-#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
+#define OUT_BATCH(d) \
+ i915_batchbuffer_dword(&intel->base.batch->base, d)
#undef OUT_RELOC
-#define OUT_RELOC(buf,flags,mask,delta) do { \
+#define OUT_RELOC(buf,flags,mask,delta) do { \
assert((delta) >= 0); \
- intel_offset_relocation(intel->batch, delta, buf, flags, mask); \
+ intel_be_offset_relocation(intel->base.batch, delta, buf, flags, mask); \
} while (0)
-#undef ADVANCE_BATCH
-#define ADVANCE_BATCH() do { } while(0)
-
-
#endif
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_swapbuffers.h"
-#include "intel_winsys.h"
#include "intel_batchbuffer.h"
+#include "intel_winsys_softpipe.h"
+
+#include "i915simple/i915_screen.h"
#include "state_tracker/st_public.h"
#include "state_tracker/st_context.h"
+/**
+ * Create i915 hardware rendering context.
+ */
+static struct pipe_context *
+intel_create_i915simple(struct intel_context *intel,
+ struct pipe_winsys *winsys)
+{
+ struct pipe_screen *screen;
+
+ /* Fill in this struct with callbacks that i915simple will need to
+ * communicate with the window system, buffer manager, etc.
+ */
+ screen = i915_create_screen(winsys, intel->intelScreen->deviceID);
+
+ /* Create the i915simple context:
+ */
+ return i915_create_context(screen, winsys, &intel->base.base );
+}
+
+static void
+intel_lock_hardware(struct intel_be_context *context)
+{
+ struct intel_context *intel = (struct intel_context *)context;
+ LOCK_HARDWARE(intel);
+}
+
+static void
+intel_unlock_hardware(struct intel_be_context *context)
+{
+ struct intel_context *intel = (struct intel_context *)context;
+ UNLOCK_HARDWARE(intel);
+}
+
+static boolean
+intel_locked_hardware(struct intel_be_context *context)
+{
+ struct intel_context *intel = (struct intel_context *)context;
+ return intel->locked ? TRUE : FALSE;
+}
+
GLboolean
intelCreateContext(const __GLcontextModes * visual,
__DRIcontextPrivate * driContextPriv,
intel->iw.irq_seq = -1;
intel->irqsEmitted = 0;
- intel->batch = intel_batchbuffer_alloc(intel);
intel->last_swap_fence = NULL;
intel->first_swap_fence = NULL;
#ifdef DEBUG
__intel_debug = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
#endif
+ intel->base.hardware_lock = intel_lock_hardware;
+ intel->base.hardware_unlock = intel_unlock_hardware;
+ intel->base.hardware_locked = intel_locked_hardware;
+
+ intel_be_init_context(&intel->base, &intelScreen->base);
/*
* Pipe-related setup
*/
if (getenv("INTEL_SP")) {
/* use softpipe driver instead of hw */
- pipe = intel_create_softpipe( intel, intelScreen->winsys );
+ pipe = intel_create_softpipe( intel, &intelScreen->base.base );
}
else {
switch (intel->intelScreen->deviceID) {
case PCI_CHIP_Q35_G:
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
- pipe = intel_create_i915simple( intel, intelScreen->winsys );
+ pipe = intel_create_i915simple( intel, &intelScreen->base.base );
break;
default:
- fprintf(stderr, "Unknown PCIID %x in %s, using software driver\n",
+ fprintf(stderr, "Unknown PCIID %x in %s, using software driver\n",
intel->intelScreen->deviceID, __FUNCTION__);
- pipe = intel_create_softpipe( intel, intelScreen->winsys );
+ pipe = intel_create_softpipe( intel, &intelScreen->base.base );
break;
}
}
if (intel) {
st_finish(intel->st);
- intel_batchbuffer_free(intel->batch);
-
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(&intel->last_swap_fence);
intel->intelScreen->dummyContext = NULL;
st_destroy_context(intel->st);
+ intel_be_destroy_context(&intel->base);
free(intel);
}
}
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#ifndef INTEL_CONTEXT_H
#include "intel_screen.h"
#include "i915_drm.h"
+#include "intel_drm/intel_be_context.h"
+
struct pipe_context;
struct intel_context;
*/
struct intel_context
{
+ struct intel_be_context base;
struct st_context *st;
struct _DriFenceObject *last_swap_fence;
struct _DriFenceObject *first_swap_fence;
- struct intel_batchbuffer *batch;
+// struct intel_batchbuffer *batch;
boolean locked;
char *prevLockFile;
} while(0)
#else
-#define DBG(flag, ...)
+#define DBG(flag, ...)
#endif
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
}
-/* Unlock the hardware using the global current context
+/* Unlock the hardware using the global current context
*/
void UNLOCK_HARDWARE( struct intel_context *intel )
{
_glthread_UNLOCK_MUTEX(lockMutex);
DBG(LOCK, "%s - unlocked\n", __progname);
-}
+}
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#include "utils.h"
#include "intel_context.h"
#include "intel_screen.h"
#include "intel_batchbuffer.h"
-//#include "intel_batchpool.h"
#include "intel_swapbuffers.h"
-#include "intel_winsys.h"
#include "i830_dri.h"
-#include "ws_dri_bufpool.h"
+#include "intel_drm/ws_dri_bufpool.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
- driBOSetStatic(intelScreen->front.buffer,
- intelScreen->front.offset,
- intelScreen->front.pitch * intelScreen->front.height,
+ driBOSetStatic(intelScreen->front.buffer,
+ intelScreen->front.offset,
+ intelScreen->front.pitch * intelScreen->front.height,
intelScreen->front.map, 0);
}
#else
- if (intelScreen->staticPool) {
+ if (intelScreen->base.staticPool) {
if (intelScreen->front.buffer)
driBOUnReference(intelScreen->front.buffer);
- driGenBuffers(intelScreen->staticPool, "front", 1, &intelScreen->front.buffer, 0, 0, 0);
+ driGenBuffers(intelScreen->base.staticPool, "front", 1, &intelScreen->front.buffer, 0, 0, 0);
driBOSetReferenced(intelScreen->front.buffer, sarea->front_bo_handle);
}
#endif
if (intelScreen->havePools)
return GL_TRUE;
-#if 0 /* ZZZ JB fix this */
- intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
- if (!intelScreen->staticPool)
- return GL_FALSE;
-
- batchPoolSize /= BATCH_SZ;
- intelScreen->batchPool = driBatchPoolInit(sPriv->fd,
- DRM_BO_FLAG_EXE |
- DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_MEM_LOCAL,
- BATCH_SZ,
- batchPoolSize, 5);
- if (!intelScreen->batchPool) {
- fprintf(stderr, "Failed to initialize batch pool - possible incorrect agpgart installed\n");
- return GL_FALSE;
- }
-#else
- intelScreen->staticPool = driDRMPoolInit(sPriv->fd);
- intelScreen->batchPool = driSlabPoolInit(sPriv->fd,
- DRM_BO_FLAG_EXE |
- DRM_BO_FLAG_MEM_TT,
- DRM_BO_FLAG_EXE |
- DRM_BO_FLAG_MEM_TT,
- intelScreen->max_batch_size,
- 1, 40, intelScreen->max_batch_size * 16, 0,
- intelScreen->fMan);
-#endif
intelScreen->havePools = GL_TRUE;
intelUpdateScreenRotation(sPriv, intelScreen->sarea);
return GL_TRUE;
}
+static const char *
+intel_get_name( struct pipe_winsys *winsys )
+{
+ return "Intel/DRI/ttm";
+}
+
+/*
+ * The state tracker (should!) keep track of whether the fake
+ * frontbuffer has been touched by any rendering since the last time
+ * we copied its contents to the real frontbuffer. Our task is easy:
+ */
+static void
+intel_flush_frontbuffer( struct pipe_winsys *winsys,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+ struct intel_context *intel = (struct intel_context *) context_private;
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+
+ intelDisplaySurface(dPriv, surf, NULL);
+}
static boolean
intelInitDriver(__DRIscreenPrivate * sPriv)
/* Allocate the private area */
intelScreen = CALLOC_STRUCT(intel_screen);
- if (!intelScreen)
+ if (!intelScreen)
return GL_FALSE;
/* parse information in __driConfigOptions */
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
- intelScreen->max_batch_size = 16 * 4096;
-
-#if 1 // ZZZ JB
- intelScreen->mgr = driFenceMgrTTMInit(sPriv->fd);
- if (!intelScreen->mgr) {
- fprintf(stderr, "Failed to create fence manager.\n");
- return GL_FALSE;
- }
-
- intelScreen->fMan = driInitFreeSlabManager(10, 10);
- if (!intelScreen->fMan) {
- fprintf(stderr, "Failed to create free slab manager.\n");
- return GL_FALSE;
- }
-
- if (!intelCreatePools(sPriv))
- return GL_FALSE;
-#endif
-
- intelScreen->winsys = intel_create_pipe_winsys(sPriv->fd, intelScreen->fMan);
+ intel_be_init_device(&intelScreen->base, sPriv->fd);
+ intelScreen->base.base.flush_frontbuffer = intel_flush_frontbuffer;
+ intelScreen->base.base.get_name = intel_get_name;
return GL_TRUE;
}
{
struct intel_screen *intelScreen = intel_screen(sPriv);
+ intel_be_destroy_device(&intelScreen->base);
/* intelUnmapScreenRegions(intelScreen); */
- if (intelScreen->havePools) {
- driPoolTakeDown(intelScreen->staticPool);
- driPoolTakeDown(intelScreen->batchPool);
- }
FREE(intelScreen);
sPriv->private = NULL;
}
* This routine also fills in the linked list pointed to by \c driver_modes
* with the \c __GLcontextModes that the driver can support for windows or
* pbuffers.
- *
- * \return A pointer to a \c __DRIscreenPrivate on success, or \c NULL on
+ *
+ * \return A pointer to a \c __DRIscreenPrivate on success, or \c NULL on
* failure.
*/
PUBLIC void *
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#ifndef _INTEL_SCREEN_H_
#include "dri_util.h"
#include "i830_common.h"
#include "xmlconfig.h"
-#include "ws_dri_bufpool.h"
+#include "intel_drm/ws_dri_bufpool.h"
#include "pipe/p_compiler.h"
+#include "intel_drm/intel_be_device.h"
struct intel_screen
{
+ struct intel_be_device base;
+
struct {
drm_handle_t handle;
int width;
int height;
int size;
- int cpp; /* for front and back buffers */
+ int cpp; /* for front and back buffers */
} front;
int deviceID;
*/
driOptionCache optionCache;
- struct _DriBufferPool *batchPool;
- struct _DriBufferPool *staticPool; /** for the X screen/framebuffer */
boolean havePools;
/**
*/
struct intel_context *dummyContext;
- /*
+ /*
* New stuff form the i915tex integration
*/
- struct _DriFenceMgr *mgr;
- struct _DriFreeSlabManager *fMan;
unsigned batch_id;
- unsigned max_batch_size;
+
struct pipe_winsys *winsys;
};
/**************************************************************************
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_swapbuffers.h"
-#include "intel_batchbuffer.h"
+
#include "intel_reg.h"
-#include "intel_winsys.h"
#include "pipe/p_context.h"
#include "state_tracker/st_public.h"
#include "state_tracker/st_context.h"
#include "state_tracker/st_cb_fbo.h"
+#include "intel_drm/ws_dri_bufmgr.h"
+#include "intel_batchbuffer.h"
/**
* Display a colorbuffer surface in an X window.
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
- pbox->x2 > intelScreen->front.width ||
+ pbox->x2 > intelScreen->front.width ||
pbox->y2 > intelScreen->front.height) {
/* invalid cliprect, skip it */
continue;
assert(box.y1 < box.y2);
/* XXX this could be done with pipe->surface_copy() */
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ /* XXX should have its own batch buffer */
+ if (!BEGIN_BATCH(8, 2)) {
+ /*
+ * Since we share this batch buffer with a context
+ * we can't flush it since that risks a GPU lockup
+ */
+ assert(0);
+ continue;
+ }
+
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((box.y1 << 16) | box.x1);
OUT_BATCH((box.y2 << 16) | box.x2);
- OUT_RELOC(intelScreen->front.buffer,
+ OUT_RELOC(intelScreen->front.buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
OUT_BATCH((sbox.y1 << 16) | sbox.x1);
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
- ADVANCE_BATCH();
}
if (intel->first_swap_fence)
driFenceUnReference(&intel->first_swap_fence);
- intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
+ intel->first_swap_fence = intel_be_batchbuffer_flush(intel->base.batch);
}
UNLOCK_HARDWARE(intel);
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#ifndef INTEL_SWAPBUFFERS_H
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef INTEL_WINSYS_H
-#define INTEL_WINSYS_H
-
-#include "pipe/p_state.h"
-
-struct intel_context;
-struct pipe_context;
-struct pipe_winsys;
-struct pipe_buffer;
-struct _DriBufferObject;
-
-struct pipe_winsys *
-intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan );
-
-void
-intel_destroy_pipe_winsys( struct pipe_winsys *winsys );
-
-struct pipe_context *
-intel_create_softpipe( struct intel_context *intel,
- struct pipe_winsys *winsys );
-
-struct pipe_context *
-intel_create_i915simple( struct intel_context *intel,
- struct pipe_winsys *winsys );
-
-
-struct intel_buffer {
- struct pipe_buffer base;
- struct _DriBufferPool *pool;
- struct _DriBufferObject *driBO;
-};
-
-static INLINE struct intel_buffer *
-intel_buffer( struct pipe_buffer *buf )
-{
- return (struct intel_buffer *)buf;
-}
-
-static INLINE struct _DriBufferObject *
-dri_bo( struct pipe_buffer *buf )
-{
- return intel_buffer(buf)->driBO;
-}
-
-
-#endif
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- */
-
-#include <stdlib.h>
-#include <xf86drm.h>
-#include "ws_dri_bufpool.h"
-#include "ws_dri_bufmgr.h"
-
-#include "intel_context.h"
-#include "intel_batchbuffer.h"
-#include "intel_winsys.h"
-
-#include "pipe/p_util.h"
-#include "pipe/p_winsys.h"
-#include "i915simple/i915_winsys.h"
-#include "i915simple/i915_screen.h"
-
-
-struct intel_i915_winsys {
- struct i915_winsys winsys; /**< batch buffer funcs */
- struct pipe_winsys *pws;
- struct intel_context *intel;
-};
-
-
-/* Turn a i915simple winsys into an intel/i915simple winsys:
- */
-static inline struct intel_i915_winsys *
-intel_i915_winsys( struct i915_winsys *sws )
-{
- return (struct intel_i915_winsys *)sws;
-}
-
-
-/* Simple batchbuffer interface:
- */
-
-static struct i915_batchbuffer*
-intel_i915_batch_get( struct i915_winsys *sws )
-{
- struct intel_context *intel = intel_i915_winsys(sws)->intel;
- return &intel->batch->base;
-}
-
-static void intel_i915_batch_reloc( struct i915_winsys *sws,
- struct pipe_buffer *buf,
- unsigned access_flags,
- unsigned delta )
-{
- struct intel_context *intel = intel_i915_winsys(sws)->intel;
- unsigned flags = DRM_BO_FLAG_MEM_TT;
- unsigned mask = DRM_BO_MASK_MEM;
-
- if (access_flags & I915_BUFFER_ACCESS_WRITE) {
- flags |= DRM_BO_FLAG_WRITE;
- mask |= DRM_BO_FLAG_WRITE;
- }
-
- if (access_flags & I915_BUFFER_ACCESS_READ) {
- flags |= DRM_BO_FLAG_READ;
- mask |= DRM_BO_FLAG_READ;
- }
-
- intel_offset_relocation( intel->batch,
- delta,
- dri_bo( buf ),
- flags,
- mask );
-}
-
-static void intel_i915_batch_flush( struct i915_winsys *sws,
- struct pipe_fence_handle **fence )
-{
- struct intel_i915_winsys *iws = intel_i915_winsys(sws);
- struct intel_context *intel = iws->intel;
- union {
- struct _DriFenceObject *dri;
- struct pipe_fence_handle *pipe;
- } fu;
-
- if (fence)
- assert(!*fence);
-
- fu.dri = intel_batchbuffer_flush( intel->batch );
-
- if (!fu.dri) {
- assert(0);
- *fence = NULL;
- return;
- }
-
- if (fu.dri) {
- if (fence)
- *fence = fu.pipe;
- else
- driFenceUnReference(&fu.dri);
- }
-
-}
-
-
-/**
- * Create i915 hardware rendering context.
- */
-struct pipe_context *
-intel_create_i915simple( struct intel_context *intel,
- struct pipe_winsys *winsys )
-{
- struct intel_i915_winsys *iws = CALLOC_STRUCT( intel_i915_winsys );
- struct pipe_screen *screen;
-
- /* Fill in this struct with callbacks that i915simple will need to
- * communicate with the window system, buffer manager, etc.
- */
- iws->winsys.batch_get = intel_i915_batch_get;
- iws->winsys.batch_reloc = intel_i915_batch_reloc;
- iws->winsys.batch_flush = intel_i915_batch_flush;
- iws->pws = winsys;
- iws->intel = intel;
-
- screen = i915_create_screen(winsys, intel->intelScreen->deviceID);
-
- /* Create the i915simple context:
- */
- return i915_create_context( screen,
- winsys,
- &iws->winsys );
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- */
-
-#include <stdlib.h>
-#include <xf86drm.h>
-
-#include "intel_context.h"
-#include "intel_winsys.h"
-#include "intel_swapbuffers.h"
-#include "intel_batchbuffer.h"
-
-#include "pipe/p_winsys.h"
-#include "pipe/p_defines.h"
-#include "pipe/p_state.h"
-#include "pipe/p_util.h"
-#include "pipe/p_inlines.h"
-
-struct intel_pipe_winsys {
- struct pipe_winsys winsys;
- struct _DriBufferPool *regionPool;
- struct _DriBufferPool *mallocPool;
- struct _DriBufferPool *vertexPool;
- struct _DriFreeSlabManager *fMan; /** shared between all pipes */
-};
-
-
-/* Turn a pipe winsys into an intel/pipe winsys:
- */
-static inline struct intel_pipe_winsys *
-intel_pipe_winsys( struct pipe_winsys *winsys )
-{
- return (struct intel_pipe_winsys *)winsys;
-}
-
-
-/*
- * Buffer functions.
- *
- * Most callbacks map direcly onto dri_bufmgr operations:
- */
-
-static void *intel_buffer_map(struct pipe_winsys *winsys,
- struct pipe_buffer *buf,
- unsigned flags )
-{
- unsigned drm_flags = 0;
-
- if (flags & PIPE_BUFFER_USAGE_CPU_WRITE)
- drm_flags |= DRM_BO_FLAG_WRITE;
-
- if (flags & PIPE_BUFFER_USAGE_CPU_READ)
- drm_flags |= DRM_BO_FLAG_READ;
-
- return driBOMap( dri_bo(buf), drm_flags, 0 );
-}
-
-static void intel_buffer_unmap(struct pipe_winsys *winsys,
- struct pipe_buffer *buf)
-{
- driBOUnmap( dri_bo(buf) );
-}
-
-static void
-intel_buffer_destroy(struct pipe_winsys *winsys,
- struct pipe_buffer *buf)
-{
- driBOUnReference( dri_bo(buf) );
- FREE(buf);
-}
-
-static struct pipe_buffer *
-intel_buffer_create(struct pipe_winsys *winsys,
- unsigned alignment,
- unsigned usage,
- unsigned size )
-{
- struct intel_buffer *buffer = CALLOC_STRUCT( intel_buffer );
- struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
- unsigned flags = 0;
- struct _DriBufferPool *pool;
-
- buffer->base.refcount = 1;
- buffer->base.alignment = alignment;
- buffer->base.usage = usage;
- buffer->base.size = size;
-
- if (usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_CONSTANT)) {
- flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
- pool = iws->mallocPool;
- } else if (usage & PIPE_BUFFER_USAGE_CUSTOM) {
- /* For vertex buffers */
- flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
- pool = iws->vertexPool;
- } else {
- flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
- pool = iws->regionPool;
- }
-
- if (usage & PIPE_BUFFER_USAGE_GPU_READ)
- flags |= DRM_BO_FLAG_READ;
-
- if (usage & PIPE_BUFFER_USAGE_GPU_WRITE)
- flags |= DRM_BO_FLAG_WRITE;
-
- /* drm complains if we don't set any read/write flags.
- */
- if ((flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) == 0)
- flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
-
-#if 0
- if (flags & IWS_BUFFER_USAGE_EXE)
- flags |= DRM_BO_FLAG_EXE;
-
- if (usage & IWS_BUFFER_USAGE_CACHED)
- flags |= DRM_BO_FLAG_CACHED;
-#endif
-
- buffer->pool = pool;
- driGenBuffers( buffer->pool,
- "pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
-
- driBOData( buffer->driBO, size, NULL, buffer->pool, 0 );
-
- return &buffer->base;
-}
-
-
-static struct pipe_buffer *
-intel_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
-{
- struct intel_buffer *buffer = CALLOC_STRUCT( intel_buffer );
- struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
-
- driGenUserBuffer( iws->regionPool,
- "pipe user buffer", &buffer->driBO, ptr, bytes );
-
- buffer->base.refcount = 1;
-
- return &buffer->base;
-}
-
-
-/*
- * Surface functions.
- *
- * Deprecated!
- */
-
-static struct pipe_surface *
-intel_i915_surface_alloc(struct pipe_winsys *winsys)
-{
- assert("intel_i915_surface_alloc is deprecated" & 0);
- return NULL;
-}
-
-static int
-intel_i915_surface_alloc_storage(struct pipe_winsys *winsys,
- struct pipe_surface *surf,
- unsigned width, unsigned height,
- enum pipe_format format,
- unsigned flags,
- unsigned tex_usage)
-{
- assert("intel_i915_surface_alloc_storage is deprecated" & 0);
- return -1;
-}
-
-static void
-intel_i915_surface_release(struct pipe_winsys *winsys, struct pipe_surface **s)
-{
- assert("intel_i915_surface_release is deprecated" & 0);
-}
-
-/*
- * Fence functions
- */
-
-static void
-intel_fence_reference( struct pipe_winsys *sws,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence )
-{
- if (*ptr)
- driFenceUnReference((struct _DriFenceObject **)ptr);
-
- if (fence)
- *ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
-}
-
-static int
-intel_fence_signalled( struct pipe_winsys *sws,
- struct pipe_fence_handle *fence,
- unsigned flag )
-{
- return driFenceSignaled((struct _DriFenceObject *)fence, flag);
-}
-
-static int
-intel_fence_finish( struct pipe_winsys *sws,
- struct pipe_fence_handle *fence,
- unsigned flag )
-{
- return driFenceFinish((struct _DriFenceObject *)fence, flag, 0);
-}
-
-
-/*
- * Mixed functions
- */
-
-static const char *
-intel_get_name( struct pipe_winsys *winsys )
-{
- return "Intel/DRI/ttm";
-}
-
-/*
- * The state tracker (should!) keep track of whether the fake
- * frontbuffer has been touched by any rendering since the last time
- * we copied its contents to the real frontbuffer. Our task is easy:
- */
-static void
-intel_flush_frontbuffer( struct pipe_winsys *winsys,
- struct pipe_surface *surf,
- void *context_private)
-{
- struct intel_context *intel = (struct intel_context *) context_private;
- __DRIdrawablePrivate *dPriv = intel->driDrawable;
-
- intelDisplaySurface(dPriv, surf, NULL);
-}
-
-struct pipe_winsys *
-intel_create_pipe_winsys( int fd, struct _DriFreeSlabManager *fMan )
-{
- struct intel_pipe_winsys *iws = CALLOC_STRUCT( intel_pipe_winsys );
-
- /* Fill in this struct with callbacks that pipe will need to
- * communicate with the window system, buffer manager, etc.
- *
- * Pipe would be happy with a malloc based memory manager, but
- * the SwapBuffers implementation in this winsys driver requires
- * that rendering be done to an appropriate _DriBufferObject.
- */
- iws->winsys.buffer_create = intel_buffer_create;
- iws->winsys.user_buffer_create = intel_user_buffer_create;
- iws->winsys.buffer_map = intel_buffer_map;
- iws->winsys.buffer_unmap = intel_buffer_unmap;
- iws->winsys.buffer_destroy = intel_buffer_destroy;
- iws->winsys.flush_frontbuffer = intel_flush_frontbuffer;
- iws->winsys.get_name = intel_get_name;
- iws->winsys.surface_alloc = intel_i915_surface_alloc;
- iws->winsys.surface_alloc_storage = intel_i915_surface_alloc_storage;
- iws->winsys.surface_release = intel_i915_surface_release;
-
- iws->winsys.fence_reference = intel_fence_reference;
- iws->winsys.fence_signalled = intel_fence_signalled;
- iws->winsys.fence_finish = intel_fence_finish;
-
- if (fd) {
- iws->regionPool = driDRMPoolInit(fd);
- iws->vertexPool = driSlabPoolInit(fd,
- DRM_BO_FLAG_READ |
- DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_MEM_TT,
- DRM_BO_FLAG_READ |
- DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_MEM_TT,
- 128 * 4096,
- 1, 120, 128 * 4096 * 4, 0,
- fMan);
- }
-
- iws->mallocPool = driMallocPoolInit();
-
- return &iws->winsys;
-}
-
-void
-intel_destroy_pipe_winsys( struct pipe_winsys *winsys )
-{
- struct intel_pipe_winsys *iws = intel_pipe_winsys(winsys);
- if (iws->regionPool) {
- driPoolTakeDown(iws->regionPool);
- }
- if (iws->mallocPool) {
- driPoolTakeDown(iws->mallocPool);
- }
- free(iws);
-}
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include "intel_context.h"
-#include "intel_winsys.h"
+#include "intel_winsys_softpipe.h"
#include "pipe/p_defines.h"
#include "pipe/p_util.h"
#include "pipe/p_format.h"
{
struct intel_softpipe_winsys *isws = CALLOC_STRUCT( intel_softpipe_winsys );
struct pipe_screen *screen = softpipe_create_screen(winsys);
-
+
/* Fill in this struct with callbacks that softpipe will need to
- * communicate with the window system, buffer manager, etc.
+ * communicate with the window system, buffer manager, etc.
*/
isws->sws.is_format_supported = intel_is_format_supported;
isws->intel = intel;
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_SOFTPIPE_H
+#define INTEL_SOFTPIPE_H
+
+struct pipe_winsys;
+struct pipe_context;
+struct intel_context;
+
+struct pipe_context *
+intel_create_softpipe( struct intel_context *intel,
+ struct pipe_winsys *winsys );
+
+#endif
/** Last context that used the buffer manager. */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
+ int pf_active;
int pf_current_page; /* which buffer is being displayed? */
- int perf_boxes; /* performance boxes to be displayed */
+ int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
drm_handle_t front_handle;
int num_cliprects; /* mulitpass with multiple cliprects? */
drm_clip_rect_t *cliprects; /* pointer to userspace cliprects */
} drmI830CmdBuffer;
-
+
typedef struct {
int *irq_seq;
} drmI830IrqEmit;
} drmI830GetParam;
#define I830_PARAM_IRQ_ACTIVE 1
-#define I830_PARAM_ALLOW_BATCHBUFFER 2
+#define I830_PARAM_ALLOW_BATCHBUFFER 2
typedef struct {
int param;
int bitsPerPixel;
int unused11[8]; /* was front/back/depth/rotated offset/pitch */
-
+
int unused12; /* logTextureGranularity */
int unused13; /* textureOffset */
+++ /dev/null
-#ifndef _INTEL_H_
-#define _INTEL_H_
-
-#include "xf86drm.h" /* drm_handle_t, etc */
-
-/* Intel */
-#ifndef PCI_CHIP_I810
-#define PCI_CHIP_I810 0x7121
-#define PCI_CHIP_I810_DC100 0x7123
-#define PCI_CHIP_I810_E 0x7125
-#define PCI_CHIP_I815 0x1132
-#define PCI_CHIP_I810_BRIDGE 0x7120
-#define PCI_CHIP_I810_DC100_BRIDGE 0x7122
-#define PCI_CHIP_I810_E_BRIDGE 0x7124
-#define PCI_CHIP_I815_BRIDGE 0x1130
-#endif
-
-#define PCI_CHIP_845_G 0x2562
-#define PCI_CHIP_I830_M 0x3577
-
-#ifndef PCI_CHIP_I855_GM
-#define PCI_CHIP_I855_GM 0x3582
-#define PCI_CHIP_I855_GM_BRIDGE 0x3580
-#endif
-
-#ifndef PCI_CHIP_I865_G
-#define PCI_CHIP_I865_G 0x2572
-#define PCI_CHIP_I865_G_BRIDGE 0x2570
-#endif
-
-#ifndef PCI_CHIP_I915_G
-#define PCI_CHIP_I915_G 0x2582
-#define PCI_CHIP_I915_G_BRIDGE 0x2580
-#endif
-
-#ifndef PCI_CHIP_I915_GM
-#define PCI_CHIP_I915_GM 0x2592
-#define PCI_CHIP_I915_GM_BRIDGE 0x2590
-#endif
-
-#ifndef PCI_CHIP_E7221_G
-#define PCI_CHIP_E7221_G 0x258A
-/* Same as I915_G_BRIDGE */
-#define PCI_CHIP_E7221_G_BRIDGE 0x2580
-#endif
-
-#ifndef PCI_CHIP_I945_G
-#define PCI_CHIP_I945_G 0x2772
-#define PCI_CHIP_I945_G_BRIDGE 0x2770
-#endif
-
-#ifndef PCI_CHIP_I945_GM
-#define PCI_CHIP_I945_GM 0x27A2
-#define PCI_CHIP_I945_GM_BRIDGE 0x27A0
-#endif
-
-#define IS_I810(pI810) (pI810->Chipset == PCI_CHIP_I810 || \
- pI810->Chipset == PCI_CHIP_I810_DC100 || \
- pI810->Chipset == PCI_CHIP_I810_E)
-#define IS_I815(pI810) (pI810->Chipset == PCI_CHIP_I815)
-#define IS_I830(pI810) (pI810->Chipset == PCI_CHIP_I830_M)
-#define IS_845G(pI810) (pI810->Chipset == PCI_CHIP_845_G)
-#define IS_I85X(pI810) (pI810->Chipset == PCI_CHIP_I855_GM)
-#define IS_I852(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I852_GM || pI810->variant == I852_GME))
-#define IS_I855(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I855_GM || pI810->variant == I855_GME))
-#define IS_I865G(pI810) (pI810->Chipset == PCI_CHIP_I865_G)
-
-#define IS_I915G(pI810) (pI810->Chipset == PCI_CHIP_I915_G || pI810->Chipset == PCI_CHIP_E7221_G)
-#define IS_I915GM(pI810) (pI810->Chipset == PCI_CHIP_I915_GM)
-#define IS_I945G(pI810) (pI810->Chipset == PCI_CHIP_I945_G)
-#define IS_I945GM(pI810) (pI810->Chipset == PCI_CHIP_I945_GM)
-#define IS_I9XX(pI810) (IS_I915G(pI810) || IS_I915GM(pI810) || IS_I945G(pI810) || IS_I945GM(pI810))
-
-#define IS_MOBILE(pI810) (IS_I830(pI810) || IS_I85X(pI810) || IS_I915GM(pI810) || IS_I945GM(pI810))
-
-#define I830_GMCH_CTRL 0x52
-
-#define I830_GMCH_MEM_MASK 0x1
-#define I830_GMCH_MEM_64M 0x1
-#define I830_GMCH_MEM_128M 0
-
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_DISABLED 0x00
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-
-#define I855_GMCH_GMS_MASK (0x7 << 4)
-#define I855_GMCH_GMS_DISABLED 0x00
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-
-typedef unsigned char Bool;
-#define TRUE 1
-#define FALSE 0
-
-#define PIPE_NONE 0<<0
-#define PIPE_CRT 1<<0
-#define PIPE_TV 1<<1
-#define PIPE_DFP 1<<2
-#define PIPE_LFP 1<<3
-#define PIPE_CRT2 1<<4
-#define PIPE_TV2 1<<5
-#define PIPE_DFP2 1<<6
-#define PIPE_LFP2 1<<7
-
-typedef struct _I830MemPool *I830MemPoolPtr;
-typedef struct _I830MemRange *I830MemRangePtr;
-typedef struct _I830MemRange {
- long Start;
- long End;
- long Size;
- unsigned long Physical;
- unsigned long Offset; /* Offset of AGP-allocated portion */
- unsigned long Alignment;
- drm_handle_t Key;
- unsigned long Pitch; // add pitch
- I830MemPoolPtr Pool;
-} I830MemRange;
-
-typedef struct _I830MemPool {
- I830MemRange Total;
- I830MemRange Free;
- I830MemRange Fixed;
- I830MemRange Allocated;
-} I830MemPool;
-
-typedef struct {
- int tail_mask;
- I830MemRange mem;
- unsigned char *virtual_start;
- int head;
- int tail;
- int space;
-} I830RingBuffer;
-
-typedef struct _I830Rec {
- unsigned char *MMIOBase;
- unsigned char *FbBase;
- int cpp;
- uint32_t aper_size;
- unsigned int bios_version;
-
- /* These are set in PreInit and never changed. */
- long FbMapSize;
- long TotalVideoRam;
- I830MemRange StolenMemory; /* pre-allocated memory */
- long BIOSMemorySize; /* min stolen pool size */
- int BIOSMemSizeLoc;
-
- /* These change according to what has been allocated. */
- long FreeMemory;
- I830MemRange MemoryAperture;
- I830MemPool StolenPool;
- long allocatedMemory;
-
- /* Regions allocated either from the above pools, or from agpgart. */
- /* for single and dual head configurations */
- I830MemRange FrontBuffer;
- I830MemRange FrontBuffer2;
- I830MemRange Scratch;
- I830MemRange Scratch2;
-
- I830RingBuffer *LpRing;
-
- I830MemRange BackBuffer;
- I830MemRange DepthBuffer;
- I830MemRange TexMem;
- int TexGranularity;
- I830MemRange ContextMem;
- int drmMinor;
- Bool have3DWindows;
-
- Bool NeedRingBufferLow;
- Bool allowPageFlip;
- Bool disableTiling;
-
- int Chipset;
- unsigned long LinearAddr;
- unsigned long MMIOAddr;
-
- drmSize registerSize; /**< \brief MMIO register map size */
- drm_handle_t registerHandle; /**< \brief MMIO register map handle */
- // IOADDRESS ioBase;
- int irq; /**< \brief IRQ number */
- int GttBound;
-
- drm_handle_t ring_map;
- unsigned int Fence[8];
-
-} I830Rec;
-
-/*
- * 12288 is set as the maximum, chosen because it is enough for
- * 1920x1440@32bpp with a 2048 pixel line pitch with some to spare.
- */
-#define I830_MAXIMUM_VBIOS_MEM 12288
-#define I830_DEFAULT_VIDEOMEM_2D (MB(32) / 1024)
-#define I830_DEFAULT_VIDEOMEM_3D (MB(64) / 1024)
-
-/* Flags for memory allocation function */
-#define FROM_ANYWHERE 0x00000000
-#define FROM_POOL_ONLY 0x00000001
-#define FROM_NEW_ONLY 0x00000002
-#define FROM_MASK 0x0000000f
-
-#define ALLOCATE_AT_TOP 0x00000010
-#define ALLOCATE_AT_BOTTOM 0x00000020
-#define FORCE_GAPS 0x00000040
-
-#define NEED_PHYSICAL_ADDR 0x00000100
-#define ALIGN_BOTH_ENDS 0x00000200
-#define FORCE_LOW 0x00000400
-
-#define ALLOC_NO_TILING 0x00001000
-#define ALLOC_INITIAL 0x00002000
-
-#define ALLOCATE_DRY_RUN 0x80000000
-
-/* Chipset registers for VIDEO BIOS memory RW access */
-#define _855_DRAM_RW_CONTROL 0x58
-#define _845_DRAM_RW_CONTROL 0x90
-#define DRAM_WRITE 0x33330000
-
-#define KB(x) ((x) * 1024)
-#define MB(x) ((x) * KB(1024))
-
-#define GTT_PAGE_SIZE KB(4)
-#define ROUND_TO(x, y) (((x) + (y) - 1) / (y) * (y))
-#define ROUND_DOWN_TO(x, y) ((x) / (y) * (y))
-#define ROUND_TO_PAGE(x) ROUND_TO((x), GTT_PAGE_SIZE)
-#define ROUND_TO_MB(x) ROUND_TO((x), MB(1))
-#define PRIMARY_RINGBUFFER_SIZE KB(128)
-
-
-/* Ring buffer registers, p277, overview p19
- */
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x000FFFF8
-#define I830_TAIL_MASK 0x001FFFF8
-
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define I830_HEAD_MASK 0x001FFFFC
-
-#define RING_START 0x08
-#define START_ADDR 0x03FFFFF8
-#define I830_RING_START_MASK 0xFFFFF000
-
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define I830_RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-
-/* Fence/Tiling ranges [0..7]
- */
-#define FENCE 0x2000
-#define FENCE_NR 8
-
-#define I915G_FENCE_START_MASK 0x0ff00000
-
-#define I830_FENCE_START_MASK 0x07f80000
-
-#define FENCE_START_MASK 0x03F80000
-#define FENCE_X_MAJOR 0x00000000
-#define FENCE_Y_MAJOR 0x00001000
-#define FENCE_SIZE_MASK 0x00000700
-#define FENCE_SIZE_512K 0x00000000
-#define FENCE_SIZE_1M 0x00000100
-#define FENCE_SIZE_2M 0x00000200
-#define FENCE_SIZE_4M 0x00000300
-#define FENCE_SIZE_8M 0x00000400
-#define FENCE_SIZE_16M 0x00000500
-#define FENCE_SIZE_32M 0x00000600
-#define FENCE_SIZE_64M 0x00000700
-#define I915G_FENCE_SIZE_1M 0x00000000
-#define I915G_FENCE_SIZE_2M 0x00000100
-#define I915G_FENCE_SIZE_4M 0x00000200
-#define I915G_FENCE_SIZE_8M 0x00000300
-#define I915G_FENCE_SIZE_16M 0x00000400
-#define I915G_FENCE_SIZE_32M 0x00000500
-#define I915G_FENCE_SIZE_64M 0x00000600
-#define I915G_FENCE_SIZE_128M 0x00000700
-#define FENCE_PITCH_1 0x00000000
-#define FENCE_PITCH_2 0x00000010
-#define FENCE_PITCH_4 0x00000020
-#define FENCE_PITCH_8 0x00000030
-#define FENCE_PITCH_16 0x00000040
-#define FENCE_PITCH_32 0x00000050
-#define FENCE_PITCH_64 0x00000060
-#define FENCE_VALID 0x00000001
-
-#include <mmio.h>
-
-# define MMIO_IN8(base, offset) \
- *(volatile unsigned char *)(((unsigned char*)(base)) + (offset))
-# define MMIO_IN32(base, offset) \
- read_MMIO_LE32(base, offset)
-# define MMIO_OUT8(base, offset, val) \
- *(volatile unsigned char *)(((unsigned char*)(base)) + (offset)) = (val)
-# define MMIO_OUT32(base, offset, val) \
- *(volatile unsigned int *)(void *)(((unsigned char*)(base)) + (offset)) = CPU_TO_LE32(val)
-
-
- /* Memory mapped register access macros */
-#define INREG8(addr) MMIO_IN8(MMIO, addr)
-#define INREG(addr) MMIO_IN32(MMIO, addr)
-#define OUTREG8(addr, val) MMIO_OUT8(MMIO, addr, val)
-#define OUTREG(addr, val) MMIO_OUT32(MMIO, addr, val)
-
-#define DSPABASE 0x70184
-
-#endif
+++ /dev/null
-/**
- * \file server/intel_dri.c
- * \brief File to perform the device-specific initialization tasks typically
- * done in the X server.
- *
- * Here they are converted to run in the client (or perhaps a standalone
- * process), and to work with the frame buffer device rather than the X
- * server infrastructure.
- *
- * Copyright (C) 2006 Dave Airlie (airlied@linux.ie)
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sub license, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice (including the
- next paragraph) shall be included in all copies or substantial portions
- of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR
- ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-
-#include "driver.h"
-#include "drm.h"
-
-#include "intel.h"
-#include "i830_dri.h"
-
-#include "memops.h"
-#include "pciaccess.h"
-
-static size_t drm_page_size;
-static int nextTile = 0;
-#define xf86DrvMsg(...) do {} while(0)
-
-static const int pitches[] = {
- 128 * 8,
- 128 * 16,
- 128 * 32,
- 128 * 64,
- 0
-};
-
-static Bool I830DRIDoMappings(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea);
-
-static unsigned long
-GetBestTileAlignment(unsigned long size)
-{
- unsigned long i;
-
- for (i = KB(512); i < size; i <<= 1)
- ;
-
- if (i > MB(64))
- i = MB(64);
-
- return i;
-}
-
-static void SetFenceRegs(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- int i;
- unsigned char *MMIO = ctx->MMIOAddress;
-
- for (i = 0; i < 8; i++) {
- OUTREG(FENCE + i * 4, pI830->Fence[i]);
- // if (I810_DEBUG & DEBUG_VERBOSE_VGA)
- fprintf(stderr,"Fence Register : %x\n", pI830->Fence[i]);
- }
-}
-
-/* Tiled memory is good... really, really good...
- *
- * Need to make it less likely that we miss out on this - probably
- * need to move the frontbuffer away from the 'guarenteed' alignment
- * of the first memory segment, or perhaps allocate a discontigous
- * framebuffer to get more alignment 'sweet spots'.
- */
-static void
-SetFence(const DRIDriverContext *ctx, I830Rec *pI830,
- int nr, unsigned int start, unsigned int pitch,
- unsigned int size)
-{
- unsigned int val;
- unsigned int fence_mask = 0;
- unsigned int fence_pitch;
-
- if (nr < 0 || nr > 7) {
- fprintf(stderr,
- "SetFence: fence %d out of range\n",nr);
- return;
- }
-
- pI830->Fence[nr] = 0;
-
- if (IS_I9XX(pI830))
- fence_mask = ~I915G_FENCE_START_MASK;
- else
- fence_mask = ~I830_FENCE_START_MASK;
-
- if (start & fence_mask) {
- fprintf(stderr,
- "SetFence: %d: start (0x%08x) is not %s aligned\n",
- nr, start, (IS_I9XX(pI830)) ? "1MB" : "512k");
- return;
- }
-
- if (start % size) {
- fprintf(stderr,
- "SetFence: %d: start (0x%08x) is not size (%dk) aligned\n",
- nr, start, size / 1024);
- return;
- }
-
- if (pitch & 127) {
- fprintf(stderr,
- "SetFence: %d: pitch (%d) not a multiple of 128 bytes\n",
- nr, pitch);
- return;
- }
-
- val = (start | FENCE_X_MAJOR | FENCE_VALID);
-
- if (IS_I9XX(pI830)) {
- switch (size) {
- case MB(1):
- val |= I915G_FENCE_SIZE_1M;
- break;
- case MB(2):
- val |= I915G_FENCE_SIZE_2M;
- break;
- case MB(4):
- val |= I915G_FENCE_SIZE_4M;
- break;
- case MB(8):
- val |= I915G_FENCE_SIZE_8M;
- break;
- case MB(16):
- val |= I915G_FENCE_SIZE_16M;
- break;
- case MB(32):
- val |= I915G_FENCE_SIZE_32M;
- break;
- case MB(64):
- val |= I915G_FENCE_SIZE_64M;
- break;
- default:
- fprintf(stderr,
- "SetFence: %d: illegal size (%d kByte)\n", nr, size / 1024);
- return;
- }
- } else {
- switch (size) {
- case KB(512):
- val |= FENCE_SIZE_512K;
- break;
- case MB(1):
- val |= FENCE_SIZE_1M;
- break;
- case MB(2):
- val |= FENCE_SIZE_2M;
- break;
- case MB(4):
- val |= FENCE_SIZE_4M;
- break;
- case MB(8):
- val |= FENCE_SIZE_8M;
- break;
- case MB(16):
- val |= FENCE_SIZE_16M;
- break;
- case MB(32):
- val |= FENCE_SIZE_32M;
- break;
- case MB(64):
- val |= FENCE_SIZE_64M;
- break;
- default:
- fprintf(stderr,
- "SetFence: %d: illegal size (%d kByte)\n", nr, size / 1024);
- return;
- }
- }
-
- if (IS_I9XX(pI830))
- fence_pitch = pitch / 512;
- else
- fence_pitch = pitch / 128;
-
- switch (fence_pitch) {
- case 1:
- val |= FENCE_PITCH_1;
- break;
- case 2:
- val |= FENCE_PITCH_2;
- break;
- case 4:
- val |= FENCE_PITCH_4;
- break;
- case 8:
- val |= FENCE_PITCH_8;
- break;
- case 16:
- val |= FENCE_PITCH_16;
- break;
- case 32:
- val |= FENCE_PITCH_32;
- break;
- case 64:
- val |= FENCE_PITCH_64;
- break;
- default:
- fprintf(stderr,
- "SetFence: %d: illegal pitch (%d)\n", nr, pitch);
- return;
- }
-
- pI830->Fence[nr] = val;
-}
-
-static Bool
-MakeTiles(const DRIDriverContext *ctx, I830Rec *pI830, I830MemRange *pMem)
-{
- int pitch, ntiles, i;
-
- pitch = pMem->Pitch * ctx->cpp;
- /*
- * Simply try to break the region up into at most four pieces of size
- * equal to the alignment.
- */
- ntiles = ROUND_TO(pMem->Size, pMem->Alignment) / pMem->Alignment;
- if (ntiles >= 4) {
- return FALSE;
- }
-
- for (i = 0; i < ntiles; i++, nextTile++) {
- SetFence(ctx, pI830, nextTile, pMem->Start + i * pMem->Alignment,
- pitch, pMem->Alignment);
- }
- return TRUE;
-}
-
-static void I830SetupMemoryTiling(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- int i;
-
- /* Clear out */
- for (i = 0; i < 8; i++)
- pI830->Fence[i] = 0;
-
- nextTile = 0;
-
- if (pI830->BackBuffer.Alignment >= KB(512)) {
- if (MakeTiles(ctx, pI830, &(pI830->BackBuffer))) {
- fprintf(stderr,
- "Activating tiled memory for the back buffer.\n");
- } else {
- fprintf(stderr,
- "MakeTiles failed for the back buffer.\n");
- pI830->allowPageFlip = FALSE;
- }
- }
-
- if (pI830->DepthBuffer.Alignment >= KB(512)) {
- if (MakeTiles(ctx, pI830, &(pI830->DepthBuffer))) {
- fprintf(stderr,
- "Activating tiled memory for the depth buffer.\n");
- } else {
- fprintf(stderr,
- "MakeTiles failed for the depth buffer.\n");
- }
- }
-
- return;
-}
-
-static int I830DetectMemory(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- struct pci_device host_bridge, ig_dev;
- uint32_t gmch_ctrl;
- int memsize = 0;
- int range;
- uint32_t aper_size;
- uint32_t membase2 = 0;
-
- memset(&host_bridge, 0, sizeof(host_bridge));
- memset(&ig_dev, 0, sizeof(ig_dev));
-
- ig_dev.dev = 2;
-
- pci_device_cfg_read_u32(&host_bridge, &gmch_ctrl, I830_GMCH_CTRL);
-
- if (IS_I830(pI830) || IS_845G(pI830)) {
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- aper_size = 0x80000000;
- } else {
- aper_size = 0x40000000;
- }
- } else {
- if (IS_I9XX(pI830)) {
- int ret;
- ret = pci_device_cfg_read_u32(&ig_dev, &membase2, 0x18);
- if (membase2 & 0x08000000)
- aper_size = 0x8000000;
- else
- aper_size = 0x10000000;
-
- fprintf(stderr,"aper size is %08X %08x %d\n", aper_size, membase2, ret);
- } else
- aper_size = 0x8000000;
- }
-
- pI830->aper_size = aper_size;
-
-
- /* We need to reduce the stolen size, by the GTT and the popup.
- * The GTT varying according the the FbMapSize and the popup is 4KB */
- range = (ctx->shared.fbSize / (1024*1024)) + 4;
-
- if (IS_I85X(pI830) || IS_I865G(pI830) || IS_I9XX(pI830)) {
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- memsize = MB(1) - KB(range);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- memsize = MB(4) - KB(range);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- memsize = MB(8) - KB(range);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- memsize = MB(16) - KB(range);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- memsize = MB(32) - KB(range);
- break;
- case I915G_GMCH_GMS_STOLEN_48M:
- if (IS_I9XX(pI830))
- memsize = MB(48) - KB(range);
- break;
- case I915G_GMCH_GMS_STOLEN_64M:
- if (IS_I9XX(pI830))
- memsize = MB(64) - KB(range);
- break;
- }
- } else {
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- memsize = KB(512) - KB(range);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- memsize = MB(1) - KB(range);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- memsize = MB(8) - KB(range);
- break;
- case I830_GMCH_GMS_LOCAL:
- memsize = 0;
- xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
- "Local memory found, but won't be used.\n");
- break;
- }
- }
- if (memsize > 0) {
- fprintf(stderr,
- "detected %d kB stolen memory.\n", memsize / 1024);
- } else {
- fprintf(stderr,
- "no video memory detected.\n");
- }
- return memsize;
-}
-
-static int AgpInit(const DRIDriverContext *ctx, I830Rec *info)
-{
- unsigned long mode = 0x4;
-
- if (drmAgpAcquire(ctx->drmFD) < 0) {
- fprintf(stderr, "[gart] AGP not available\n");
- return 0;
- }
-
- if (drmAgpEnable(ctx->drmFD, mode) < 0) {
- fprintf(stderr, "[gart] AGP not enabled\n");
- drmAgpRelease(ctx->drmFD);
- return 0;
- }
- else
- fprintf(stderr, "[gart] AGP enabled at %dx\n", ctx->agpmode);
-
- return 1;
-}
-
-/*
- * Allocate memory from the given pool. Grow the pool if needed and if
- * possible.
- */
-static unsigned long
-AllocFromPool(const DRIDriverContext *ctx, I830Rec *pI830,
- I830MemRange *result, I830MemPool *pool,
- long size, unsigned long alignment, int flags)
-{
- long needed, start, end;
-
- if (!result || !pool || !size)
- return 0;
-
- /* Calculate how much space is needed. */
- if (alignment <= GTT_PAGE_SIZE)
- needed = size;
- else {
- start = ROUND_TO(pool->Free.Start, alignment);
- end = ROUND_TO(start + size, alignment);
- needed = end - pool->Free.Start;
- }
- if (needed > pool->Free.Size) {
- return 0;
- }
-
- result->Start = ROUND_TO(pool->Free.Start, alignment);
- pool->Free.Start += needed;
- result->End = pool->Free.Start;
-
- pool->Free.Size = pool->Free.End - pool->Free.Start;
- result->Size = result->End - result->Start;
- result->Pool = pool;
- result->Alignment = alignment;
- return needed;
-}
-
-static unsigned long AllocFromAGP(const DRIDriverContext *ctx, I830Rec *pI830, long size, unsigned long alignment, I830MemRange *result)
-{
- unsigned long start, end;
- unsigned long newApStart, newApEnd;
- int ret;
- if (!result || !size)
- return 0;
-
- if (!alignment)
- alignment = 4;
-
- start = ROUND_TO(pI830->MemoryAperture.Start, alignment);
- end = ROUND_TO(start + size, alignment);
- newApStart = end;
- newApEnd = pI830->MemoryAperture.End;
-
- ret=drmAgpAlloc(ctx->drmFD, size, 0, &(result->Physical), (drm_handle_t *)&(result->Key));
-
- if (ret)
- {
- fprintf(stderr,"drmAgpAlloc failed %d\n", ret);
- return 0;
- }
- pI830->allocatedMemory += size;
- pI830->MemoryAperture.Start = newApStart;
- pI830->MemoryAperture.End = newApEnd;
- pI830->MemoryAperture.Size = newApEnd - newApStart;
- // pI830->FreeMemory -= size;
- result->Start = start;
- result->End = start + size;
- result->Size = size;
- result->Offset = start;
- result->Alignment = alignment;
- result->Pool = NULL;
-
- return size;
-}
-
-unsigned long
-I830AllocVidMem(const DRIDriverContext *ctx, I830Rec *pI830,
- I830MemRange *result, I830MemPool *pool, long size,
- unsigned long alignment, int flags)
-{
- unsigned long ret;
-
- if (!result)
- return 0;
-
- /* Make sure these are initialised. */
- result->Size = 0;
- result->Key = -1;
-
- if (!size) {
- return 0;
- }
-
- if (pool->Free.Size < size) {
- ret = AllocFromAGP(ctx, pI830, size, alignment, result);
- }
- else {
- ret = AllocFromPool(ctx, pI830, result, pool, size, alignment, flags);
- if (ret == 0)
- ret = AllocFromAGP(ctx, pI830, size, alignment, result);
- }
- return ret;
-}
-
-static Bool BindAgpRange(const DRIDriverContext *ctx, I830MemRange *mem)
-{
- if (!mem)
- return FALSE;
-
- if (mem->Key == -1)
- return TRUE;
-
- return !drmAgpBind(ctx->drmFD, mem->Key, mem->Offset);
-}
-
-/* simple memory allocation routines needed */
-/* put ring buffer in low memory */
-/* need to allocate front, back, depth buffers aligned correctly,
- allocate ring buffer,
-*/
-
-/* */
-static Bool
-I830AllocateMemory(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- unsigned long size, ret;
- unsigned long lines, lineSize, align;
-
- /* allocate ring buffer */
- memset(pI830->LpRing, 0, sizeof(I830RingBuffer));
- pI830->LpRing->mem.Key = -1;
-
- size = PRIMARY_RINGBUFFER_SIZE;
-
- ret = I830AllocVidMem(ctx, pI830, &pI830->LpRing->mem, &pI830->StolenPool, size, 0x1000, 0);
-
- if (ret != size)
- {
- fprintf(stderr,"unable to allocate ring buffer %ld\n", ret);
- return FALSE;
- }
-
- pI830->LpRing->tail_mask = pI830->LpRing->mem.Size - 1;
-
-
- /* allocate front buffer */
- memset(&(pI830->FrontBuffer), 0, sizeof(pI830->FrontBuffer));
- pI830->FrontBuffer.Key = -1;
- pI830->FrontBuffer.Pitch = ctx->shared.virtualWidth;
-
- align = KB(512);
-
- lineSize = ctx->shared.virtualWidth * ctx->cpp;
- lines = (ctx->shared.virtualHeight + 15) / 16 * 16;
- size = lineSize * lines;
- size = ROUND_TO_PAGE(size);
-
- align = GetBestTileAlignment(size);
-
- ret = I830AllocVidMem(ctx, pI830, &pI830->FrontBuffer, &pI830->StolenPool, size, align, 0);
- if (ret < size)
- {
- fprintf(stderr,"unable to allocate front buffer %ld\n", ret);
- return FALSE;
- }
-
- memset(&(pI830->BackBuffer), 0, sizeof(pI830->BackBuffer));
- pI830->BackBuffer.Key = -1;
- pI830->BackBuffer.Pitch = ctx->shared.virtualWidth;
-
- ret = I830AllocVidMem(ctx, pI830, &pI830->BackBuffer, &pI830->StolenPool, size, align, 0);
- if (ret < size)
- {
- fprintf(stderr,"unable to allocate back buffer %ld\n", ret);
- return FALSE;
- }
-
- memset(&(pI830->DepthBuffer), 0, sizeof(pI830->DepthBuffer));
- pI830->DepthBuffer.Key = -1;
- pI830->DepthBuffer.Pitch = ctx->shared.virtualWidth;
-
- ret = I830AllocVidMem(ctx, pI830, &pI830->DepthBuffer, &pI830->StolenPool, size, align, 0);
- if (ret < size)
- {
- fprintf(stderr,"unable to allocate depth buffer %ld\n", ret);
- return FALSE;
- }
-
- memset(&(pI830->ContextMem), 0, sizeof(pI830->ContextMem));
- pI830->ContextMem.Key = -1;
- size = KB(32);
-
- ret = I830AllocVidMem(ctx, pI830, &pI830->ContextMem, &pI830->StolenPool, size, align, 0);
- if (ret < size)
- {
- fprintf(stderr,"unable to allocate context buffer %ld\n", ret);
- return FALSE;
- }
-
-#if 0
- memset(&(pI830->TexMem), 0, sizeof(pI830->TexMem));
- pI830->TexMem.Key = -1;
-
- size = 32768 * 1024;
- ret = AllocFromAGP(ctx, pI830, size, align, &pI830->TexMem);
- if (ret < size)
- {
- fprintf(stderr,"unable to allocate texture memory %ld\n", ret);
- return FALSE;
- }
-#endif
-
- return TRUE;
-}
-
-static Bool
-I830BindMemory(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- if (!BindAgpRange(ctx, &pI830->LpRing->mem))
- return FALSE;
- if (!BindAgpRange(ctx, &pI830->FrontBuffer))
- return FALSE;
- if (!BindAgpRange(ctx, &pI830->BackBuffer))
- return FALSE;
- if (!BindAgpRange(ctx, &pI830->DepthBuffer))
- return FALSE;
- if (!BindAgpRange(ctx, &pI830->ContextMem))
- return FALSE;
-#if 0
- if (!BindAgpRange(ctx, &pI830->TexMem))
- return FALSE;
-#endif
- return TRUE;
-}
-
-static void SetupDRIMM(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- unsigned long aperEnd = ROUND_DOWN_TO(pI830->aper_size, GTT_PAGE_SIZE) / GTT_PAGE_SIZE;
- unsigned long aperStart = ROUND_TO(pI830->aper_size - KB(32768), GTT_PAGE_SIZE) / GTT_PAGE_SIZE;
-
- fprintf(stderr, "aper size is %08X\n", ctx->shared.fbSize);
- if (drmMMInit(ctx->drmFD, aperStart, aperEnd - aperStart, DRM_BO_MEM_TT)) {
- fprintf(stderr,
- "DRM MM Initialization Failed\n");
- } else {
- fprintf(stderr,
- "DRM MM Initialized at offset 0x%lx length %d page\n", aperStart, aperEnd-aperStart);
- }
-
-}
-
-static Bool
-I830CleanupDma(const DRIDriverContext *ctx)
-{
- drmI830Init info;
-
- memset(&info, 0, sizeof(drmI830Init));
- info.func = I830_CLEANUP_DMA;
-
- if (drmCommandWrite(ctx->drmFD, DRM_I830_INIT,
- &info, sizeof(drmI830Init))) {
- fprintf(stderr, "I830 Dma Cleanup Failed\n");
- return FALSE;
- }
-
- return TRUE;
-}
-
-static Bool
-I830InitDma(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- I830RingBuffer *ring = pI830->LpRing;
- drmI830Init info;
-
- memset(&info, 0, sizeof(drmI830Init));
- info.func = I830_INIT_DMA;
-
- info.ring_start = ring->mem.Start + pI830->LinearAddr;
- info.ring_end = ring->mem.End + pI830->LinearAddr;
- info.ring_size = ring->mem.Size;
-
- info.mmio_offset = (unsigned int)ctx->MMIOStart;
-
- info.sarea_priv_offset = sizeof(drm_sarea_t);
-
- info.front_offset = pI830->FrontBuffer.Start;
- info.back_offset = pI830->BackBuffer.Start;
- info.depth_offset = pI830->DepthBuffer.Start;
- info.w = ctx->shared.virtualWidth;
- info.h = ctx->shared.virtualHeight;
- info.pitch = ctx->shared.virtualWidth;
- info.back_pitch = pI830->BackBuffer.Pitch;
- info.depth_pitch = pI830->DepthBuffer.Pitch;
- info.cpp = ctx->cpp;
-
- if (drmCommandWrite(ctx->drmFD, DRM_I830_INIT,
- &info, sizeof(drmI830Init))) {
- fprintf(stderr,
- "I830 Dma Initialization Failed\n");
- return FALSE;
- }
-
- return TRUE;
-}
-
-static int I830CheckDRMVersion( const DRIDriverContext *ctx,
- I830Rec *pI830 )
-{
- drmVersionPtr version;
-
- version = drmGetVersion(ctx->drmFD);
-
- if (version) {
- int req_minor, req_patch;
-
- req_minor = 4;
- req_patch = 0;
-
- if (version->version_major != 1 ||
- version->version_minor < req_minor ||
- (version->version_minor == req_minor &&
- version->version_patchlevel < req_patch)) {
- /* Incompatible drm version */
- fprintf(stderr,
- "[dri] I830DRIScreenInit failed because of a version "
- "mismatch.\n"
- "[dri] i915.o kernel module version is %d.%d.%d "
- "but version 1.%d.%d or newer is needed.\n"
- "[dri] Disabling DRI.\n",
- version->version_major,
- version->version_minor,
- version->version_patchlevel,
- req_minor,
- req_patch);
- drmFreeVersion(version);
- return 0;
- }
-
- pI830->drmMinor = version->version_minor;
- drmFreeVersion(version);
- }
- return 1;
-}
-
-static void
-I830SetRingRegs(const DRIDriverContext *ctx, I830Rec *pI830)
-{
- unsigned int itemp;
- unsigned char *MMIO = ctx->MMIOAddress;
-
- OUTREG(LP_RING + RING_LEN, 0);
- OUTREG(LP_RING + RING_TAIL, 0);
- OUTREG(LP_RING + RING_HEAD, 0);
-
- if ((long)(pI830->LpRing->mem.Start & I830_RING_START_MASK) !=
- pI830->LpRing->mem.Start) {
- fprintf(stderr,
- "I830SetRingRegs: Ring buffer start (%lx) violates its "
- "mask (%x)\n", pI830->LpRing->mem.Start, I830_RING_START_MASK);
- }
- /* Don't care about the old value. Reserved bits must be zero anyway. */
- itemp = pI830->LpRing->mem.Start & I830_RING_START_MASK;
- OUTREG(LP_RING + RING_START, itemp);
-
- if (((pI830->LpRing->mem.Size - 4096) & I830_RING_NR_PAGES) !=
- pI830->LpRing->mem.Size - 4096) {
- fprintf(stderr,
- "I830SetRingRegs: Ring buffer size - 4096 (%lx) violates its "
- "mask (%x)\n", pI830->LpRing->mem.Size - 4096,
- I830_RING_NR_PAGES);
- }
- /* Don't care about the old value. Reserved bits must be zero anyway. */
- itemp = (pI830->LpRing->mem.Size - 4096) & I830_RING_NR_PAGES;
- itemp |= (RING_NO_REPORT | RING_VALID);
- OUTREG(LP_RING + RING_LEN, itemp);
-
- pI830->LpRing->head = INREG(LP_RING + RING_HEAD) & I830_HEAD_MASK;
- pI830->LpRing->tail = INREG(LP_RING + RING_TAIL);
- pI830->LpRing->space = pI830->LpRing->head - (pI830->LpRing->tail + 8);
- if (pI830->LpRing->space < 0)
- pI830->LpRing->space += pI830->LpRing->mem.Size;
-
- SetFenceRegs(ctx, pI830);
-
- /* RESET THE DISPLAY PIPE TO POINT TO THE FRONTBUFFER - hacky
- hacky hacky */
- OUTREG(DSPABASE, pI830->FrontBuffer.Start + pI830->LinearAddr);
-
-}
-
-static Bool
-I830SetParam(const DRIDriverContext *ctx, int param, int value)
-{
- drmI830SetParam sp;
-
- memset(&sp, 0, sizeof(sp));
- sp.param = param;
- sp.value = value;
-
- if (drmCommandWrite(ctx->drmFD, DRM_I830_SETPARAM, &sp, sizeof(sp))) {
- fprintf(stderr, "I830 SetParam Failed\n");
- return FALSE;
- }
-
- return TRUE;
-}
-
-static Bool
-I830DRIMapScreenRegions(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
-{
- fprintf(stderr,
- "[drm] Mapping front buffer\n");
-
- if (drmAddMap(ctx->drmFD,
- (drm_handle_t)(sarea->front_offset + pI830->LinearAddr),
- sarea->front_size,
- DRM_FRAME_BUFFER, /*DRM_AGP,*/
- 0,
- &sarea->front_handle) < 0) {
- fprintf(stderr,
- "[drm] drmAddMap(front_handle) failed. Disabling DRI\n");
- return FALSE;
- }
- ctx->shared.hFrameBuffer = sarea->front_handle;
- ctx->shared.fbSize = sarea->front_size;
- fprintf(stderr, "[drm] Front Buffer = 0x%08x\n",
- sarea->front_handle);
-
- if (drmAddMap(ctx->drmFD,
- (drm_handle_t)(sarea->back_offset),
- sarea->back_size, DRM_AGP, 0,
- &sarea->back_handle) < 0) {
- fprintf(stderr,
- "[drm] drmAddMap(back_handle) failed. Disabling DRI\n");
- return FALSE;
- }
- fprintf(stderr, "[drm] Back Buffer = 0x%08x\n",
- sarea->back_handle);
-
- if (drmAddMap(ctx->drmFD,
- (drm_handle_t)sarea->depth_offset,
- sarea->depth_size, DRM_AGP, 0,
- &sarea->depth_handle) < 0) {
- fprintf(stderr,
- "[drm] drmAddMap(depth_handle) failed. Disabling DRI\n");
- return FALSE;
- }
- fprintf(stderr, "[drm] Depth Buffer = 0x%08x\n",
- sarea->depth_handle);
-
-#if 0
- if (drmAddMap(ctx->drmFD,
- (drm_handle_t)sarea->tex_offset,
- sarea->tex_size, DRM_AGP, 0,
- &sarea->tex_handle) < 0) {
- fprintf(stderr,
- "[drm] drmAddMap(tex_handle) failed. Disabling DRI\n");
- return FALSE;
- }
- fprintf(stderr, "[drm] textures = 0x%08x\n",
- sarea->tex_handle);
-#endif
- return TRUE;
-}
-
-
-static void
-I830DRIUnmapScreenRegions(const DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
-{
-#if 1
- if (sarea->front_handle) {
- drmRmMap(ctx->drmFD, sarea->front_handle);
- sarea->front_handle = 0;
- }
-#endif
- if (sarea->back_handle) {
- drmRmMap(ctx->drmFD, sarea->back_handle);
- sarea->back_handle = 0;
- }
- if (sarea->depth_handle) {
- drmRmMap(ctx->drmFD, sarea->depth_handle);
- sarea->depth_handle = 0;
- }
- if (sarea->tex_handle) {
- drmRmMap(ctx->drmFD, sarea->tex_handle);
- sarea->tex_handle = 0;
- }
-}
-
-static Bool
-I830DRIDoMappings(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
-{
- if (drmAddMap(ctx->drmFD,
- (drm_handle_t)pI830->LpRing->mem.Start,
- pI830->LpRing->mem.Size, DRM_AGP, 0,
- &pI830->ring_map) < 0) {
- fprintf(stderr,
- "[drm] drmAddMap(ring_map) failed. Disabling DRI\n");
- return FALSE;
- }
- fprintf(stderr, "[drm] ring buffer = 0x%08x\n",
- pI830->ring_map);
-
- if (I830InitDma(ctx, pI830) == FALSE) {
- return FALSE;
- }
-
- /* init to zero to be safe */
-
- I830DRIMapScreenRegions(ctx, pI830, sarea);
- SetupDRIMM(ctx, pI830);
-
- if (ctx->pciDevice != PCI_CHIP_845_G &&
- ctx->pciDevice != PCI_CHIP_I830_M) {
- I830SetParam(ctx, I830_SETPARAM_USE_MI_BATCHBUFFER_START, 1 );
- }
-
- /* Okay now initialize the dma engine */
- {
- pI830->irq = drmGetInterruptFromBusID(ctx->drmFD,
- ctx->pciBus,
- ctx->pciDevice,
- ctx->pciFunc);
-
- if (drmCtlInstHandler(ctx->drmFD, pI830->irq)) {
- fprintf(stderr,
- "[drm] failure adding irq handler\n");
- pI830->irq = 0;
- return FALSE;
- }
- else
- fprintf(stderr,
- "[drm] dma control initialized, using IRQ %d\n",
- pI830->irq);
- }
-
- fprintf(stderr, "[dri] visual configs initialized\n");
-
- return TRUE;
-}
-
-static Bool
-I830ClearScreen(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
-{
- /* need to drmMap front and back buffers and zero them */
- drmAddress map_addr;
- int ret;
-
- ret = drmMap(ctx->drmFD,
- sarea->front_handle,
- sarea->front_size,
- &map_addr);
-
- if (ret)
- {
- fprintf(stderr, "Unable to map front buffer\n");
- return FALSE;
- }
-
- drimemsetio((char *)map_addr,
- 0,
- sarea->front_size);
- drmUnmap(map_addr, sarea->front_size);
-
-
- ret = drmMap(ctx->drmFD,
- sarea->back_handle,
- sarea->back_size,
- &map_addr);
-
- if (ret)
- {
- fprintf(stderr, "Unable to map back buffer\n");
- return FALSE;
- }
-
- drimemsetio((char *)map_addr,
- 0,
- sarea->back_size);
- drmUnmap(map_addr, sarea->back_size);
-
- return TRUE;
-}
-
-static Bool
-I830ScreenInit(DRIDriverContext *ctx, I830Rec *pI830)
-
-{
- I830DRIPtr pI830DRI;
- drmI830Sarea *pSAREAPriv;
- int err;
-
- drm_page_size = getpagesize();
-
- pI830->registerSize = ctx->MMIOSize;
- /* This is a hack for now. We have to have more than a 4k page here
- * because of the size of the state. However, the state should be
- * in a per-context mapping. This will be added in the Mesa 3.5 port
- * of the I830 driver.
- */
- ctx->shared.SAREASize = SAREA_MAX;
-
- /* Note that drmOpen will try to load the kernel module, if needed. */
- ctx->drmFD = drmOpen("i915", NULL );
- if (ctx->drmFD < 0) {
- fprintf(stderr, "[drm] drmOpen failed\n");
- return 0;
- }
-
- if ((err = drmSetBusid(ctx->drmFD, ctx->pciBusID)) < 0) {
- fprintf(stderr, "[drm] drmSetBusid failed (%d, %s), %s\n",
- ctx->drmFD, ctx->pciBusID, strerror(-err));
- return 0;
- }
-
- if (drmAddMap( ctx->drmFD,
- 0,
- ctx->shared.SAREASize,
- DRM_SHM,
- DRM_CONTAINS_LOCK,
- &ctx->shared.hSAREA) < 0)
- {
- fprintf(stderr, "[drm] drmAddMap failed\n");
- return 0;
- }
-
- fprintf(stderr, "[drm] added %d byte SAREA at 0x%08x\n",
- ctx->shared.SAREASize, ctx->shared.hSAREA);
-
- if (drmMap( ctx->drmFD,
- ctx->shared.hSAREA,
- ctx->shared.SAREASize,
- (drmAddressPtr)(&ctx->pSAREA)) < 0)
- {
- fprintf(stderr, "[drm] drmMap failed\n");
- return 0;
-
- }
-
- memset(ctx->pSAREA, 0, ctx->shared.SAREASize);
- fprintf(stderr, "[drm] mapped SAREA 0x%08x to %p, size %d\n",
- ctx->shared.hSAREA, ctx->pSAREA, ctx->shared.SAREASize);
-
-
- if (drmAddMap(ctx->drmFD,
- ctx->MMIOStart,
- ctx->MMIOSize,
- DRM_REGISTERS,
- DRM_READ_ONLY,
- &pI830->registerHandle) < 0) {
- fprintf(stderr, "[drm] drmAddMap mmio failed\n");
- return 0;
- }
- fprintf(stderr,
- "[drm] register handle = 0x%08x\n", pI830->registerHandle);
-
-
- if (!I830CheckDRMVersion(ctx, pI830)) {
- return FALSE;
- }
-
- /* Create a 'server' context so we can grab the lock for
- * initialization ioctls.
- */
- if ((err = drmCreateContext(ctx->drmFD, &ctx->serverContext)) != 0) {
- fprintf(stderr, "%s: drmCreateContext failed %d\n", __FUNCTION__, err);
- return 0;
- }
-
- DRM_LOCK(ctx->drmFD, ctx->pSAREA, ctx->serverContext, 0);
-
- /* Initialize the SAREA private data structure */
- pSAREAPriv = (drmI830Sarea *)(((char*)ctx->pSAREA) +
- sizeof(drm_sarea_t));
- memset(pSAREAPriv, 0, sizeof(*pSAREAPriv));
-
- pI830->StolenMemory.Size = I830DetectMemory(ctx, pI830);
- pI830->StolenMemory.Start = 0;
- pI830->StolenMemory.End = pI830->StolenMemory.Size;
-
- pI830->MemoryAperture.Start = pI830->StolenMemory.End;
- pI830->MemoryAperture.End = KB(40000);
- pI830->MemoryAperture.Size = pI830->MemoryAperture.End - pI830->MemoryAperture.Start;
-
- pI830->StolenPool.Fixed = pI830->StolenMemory;
- pI830->StolenPool.Total = pI830->StolenMemory;
- pI830->StolenPool.Free = pI830->StolenPool.Total;
- pI830->FreeMemory = pI830->StolenPool.Total.Size;
-
- if (!AgpInit(ctx, pI830))
- return FALSE;
-
- if (I830AllocateMemory(ctx, pI830) == FALSE)
- {
- return FALSE;
- }
-
- if (I830BindMemory(ctx, pI830) == FALSE)
- {
- return FALSE;
- }
-
- pSAREAPriv->rotated_offset = -1;
- pSAREAPriv->rotated_size = 0;
- pSAREAPriv->rotated_pitch = ctx->shared.virtualWidth;
-
- pSAREAPriv->front_offset = pI830->FrontBuffer.Start;
- pSAREAPriv->front_size = pI830->FrontBuffer.Size;
- pSAREAPriv->width = ctx->shared.virtualWidth;
- pSAREAPriv->height = ctx->shared.virtualHeight;
- pSAREAPriv->pitch = ctx->shared.virtualWidth;
- pSAREAPriv->virtualX = ctx->shared.virtualWidth;
- pSAREAPriv->virtualY = ctx->shared.virtualHeight;
- pSAREAPriv->back_offset = pI830->BackBuffer.Start;
- pSAREAPriv->back_size = pI830->BackBuffer.Size;
- pSAREAPriv->depth_offset = pI830->DepthBuffer.Start;
- pSAREAPriv->depth_size = pI830->DepthBuffer.Size;
-#if 0
- pSAREAPriv->tex_offset = pI830->TexMem.Start;
- pSAREAPriv->tex_size = pI830->TexMem.Size;
-#endif
- pSAREAPriv->log_tex_granularity = pI830->TexGranularity;
-
- ctx->driverClientMsg = malloc(sizeof(I830DRIRec));
- ctx->driverClientMsgSize = sizeof(I830DRIRec);
- pI830DRI = (I830DRIPtr)ctx->driverClientMsg;
- pI830DRI->deviceID = pI830->Chipset;
- pI830DRI->regsSize = I830_REG_SIZE;
- pI830DRI->width = ctx->shared.virtualWidth;
- pI830DRI->height = ctx->shared.virtualHeight;
- pI830DRI->mem = ctx->shared.fbSize;
- pI830DRI->cpp = ctx->cpp;
-
- pI830DRI->bitsPerPixel = ctx->bpp;
- pI830DRI->sarea_priv_offset = sizeof(drm_sarea_t);
-
- err = I830DRIDoMappings(ctx, pI830, pSAREAPriv);
- if (err == FALSE)
- return FALSE;
-
- I830SetupMemoryTiling(ctx, pI830);
-
- /* Quick hack to clear the front & back buffers. Could also use
- * the clear ioctl to do this, but would need to setup hw state
- * first.
- */
- I830ClearScreen(ctx, pI830, pSAREAPriv);
-
- I830SetRingRegs(ctx, pI830);
-
- return TRUE;
-}
-
-
-/**
- * \brief Validate the fbdev mode.
- *
- * \param ctx display handle.
- *
- * \return one on success, or zero on failure.
- *
- * Saves some registers and returns 1.
- *
- * \sa radeonValidateMode().
- */
-static int i830ValidateMode( const DRIDriverContext *ctx )
-{
- return 1;
-}
-
-/**
- * \brief Examine mode returned by fbdev.
- *
- * \param ctx display handle.
- *
- * \return one on success, or zero on failure.
- *
- * Restores registers that fbdev has clobbered and returns 1.
- *
- * \sa i810ValidateMode().
- */
-static int i830PostValidateMode( const DRIDriverContext *ctx )
-{
- I830Rec *pI830 = ctx->driverPrivate;
-
- I830SetRingRegs(ctx, pI830);
- return 1;
-}
-
-
-/**
- * \brief Initialize the framebuffer device mode
- *
- * \param ctx display handle.
- *
- * \return one on success, or zero on failure.
- *
- * Fills in \p info with some default values and some information from \p ctx
- * and then calls I810ScreenInit() for the screen initialization.
- *
- * Before exiting clears the framebuffer memory accessing it directly.
- */
-static int i830InitFBDev( DRIDriverContext *ctx )
-{
- I830Rec *pI830 = calloc(1, sizeof(I830Rec));
- int i;
-
- {
- int dummy = ctx->shared.virtualWidth;
-
- switch (ctx->bpp / 8) {
- case 1: dummy = (ctx->shared.virtualWidth + 127) & ~127; break;
- case 2: dummy = (ctx->shared.virtualWidth + 31) & ~31; break;
- case 3:
- case 4: dummy = (ctx->shared.virtualWidth + 15) & ~15; break;
- }
-
- ctx->shared.virtualWidth = dummy;
- ctx->shared.Width = ctx->shared.virtualWidth;
- }
-
-
- for (i = 0; pitches[i] != 0; i++) {
- if (pitches[i] >= ctx->shared.virtualWidth) {
- ctx->shared.virtualWidth = pitches[i];
- break;
- }
- }
-
- ctx->driverPrivate = (void *)pI830;
-
- pI830->LpRing = calloc(1, sizeof(I830RingBuffer));
- pI830->Chipset = ctx->chipset;
- pI830->LinearAddr = ctx->FBStart;
-
- if (!I830ScreenInit( ctx, pI830 ))
- return 0;
-
-
- return 1;
-}
-
-
-/**
- * \brief The screen is being closed, so clean up any state and free any
- * resources used by the DRI.
- *
- * \param ctx display handle.
- *
- * Unmaps the SAREA, closes the DRM device file descriptor and frees the driver
- * private data.
- */
-static void i830HaltFBDev( DRIDriverContext *ctx )
-{
- drmI830Sarea *pSAREAPriv;
- I830Rec *pI830 = ctx->driverPrivate;
-
- if (pI830->irq) {
- drmCtlUninstHandler(ctx->drmFD);
- pI830->irq = 0; }
-
- I830CleanupDma(ctx);
-
- pSAREAPriv = (drmI830Sarea *)(((char*)ctx->pSAREA) +
- sizeof(drm_sarea_t));
-
- I830DRIUnmapScreenRegions(ctx, pI830, pSAREAPriv);
- drmUnmap( ctx->pSAREA, ctx->shared.SAREASize );
- drmClose(ctx->drmFD);
-
- if (ctx->driverPrivate) {
- free(ctx->driverPrivate);
- ctx->driverPrivate = 0;
- }
-}
-
-
-extern void i810NotifyFocus( int );
-
-/**
- * \brief Exported driver interface for Mini GLX.
- *
- * \sa DRIDriverRec.
- */
-const struct DRIDriverRec __driDriver = {
- i830ValidateMode,
- i830PostValidateMode,
- i830InitFBDev,
- i830HaltFBDev,
- NULL,//I830EngineShutdown,
- NULL, //I830EngineRestore,
-#ifndef _EMBEDDED
- 0,
-#else
- i810NotifyFocus,
-#endif
-};
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include "glthread.h"
-#include "errno.h"
-#include "ws_dri_bufmgr.h"
-#include "string.h"
-#include "imports.h"
-#include "ws_dri_bufpool.h"
-#include "ws_dri_fencemgr.h"
-
-/*
- * This lock is here to protect drmBO structs changing underneath us during a
- * validate list call, since validatelist cannot take individiual locks for
- * each drmBO. Validatelist takes this lock in write mode. Any access to an
- * individual drmBO should take this lock in read mode, since in that case, the
- * driBufferObject mutex will protect the access. Locking order is
- * driBufferObject mutex - > this rw lock.
- */
-
-_glthread_DECLARE_STATIC_MUTEX(bmMutex);
-_glthread_DECLARE_STATIC_COND(bmCond);
-
-static int kernelReaders = 0;
-static int num_buffers = 0;
-static int num_user_buffers = 0;
-
-static drmBO *drmBOListBuf(void *iterator)
-{
- drmBONode *node;
- drmMMListHead *l = (drmMMListHead *) iterator;
- node = DRMLISTENTRY(drmBONode, l, head);
- return node->buf;
-}
-
-static void *drmBOListIterator(drmBOList *list)
-{
- void *ret = list->list.next;
-
- if (ret == &list->list)
- return NULL;
- return ret;
-}
-
-static void *drmBOListNext(drmBOList *list, void *iterator)
-{
- void *ret;
-
- drmMMListHead *l = (drmMMListHead *) iterator;
- ret = l->next;
- if (ret == &list->list)
- return NULL;
- return ret;
-}
-
-static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
- uint64_t arg0,
- uint64_t arg1)
-{
- drmBONode *node;
- drmMMListHead *l;
-
- l = list->free.next;
- if (l == &list->free) {
- node = (drmBONode *) malloc(sizeof(*node));
- if (!node) {
- return NULL;
- }
- list->numCurrent++;
- }
- else {
- DRMLISTDEL(l);
- node = DRMLISTENTRY(drmBONode, l, head);
- }
- node->buf = item;
- node->arg0 = arg0;
- node->arg1 = arg1;
- DRMLISTADD(&node->head, &list->list);
- list->numOnList++;
- return node;
-}
-
-static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
- uint64_t mask, int *newItem)
-{
- drmBONode *node, *cur;
- drmMMListHead *l;
-
- *newItem = 0;
- cur = NULL;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
- if (node->buf == buf) {
- cur = node;
- break;
- }
- }
- if (!cur) {
- cur = drmAddListItem(list, buf, flags, mask);
- if (!cur) {
- return -ENOMEM;
- }
- *newItem = 1;
- cur->arg0 = flags;
- cur->arg1 = mask;
- }
- else {
- uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
- uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
-
- if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
- return -EINVAL;
- }
-
- cur->arg1 |= mask;
- cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
-
- if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
- (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static void drmBOFreeList(drmBOList *list)
-{
- drmBONode *node;
- drmMMListHead *l;
-
- l = list->list.next;
- while(l != &list->list) {
- DRMLISTDEL(l);
- node = DRMLISTENTRY(drmBONode, l, head);
- free(node);
- l = list->list.next;
- list->numCurrent--;
- list->numOnList--;
- }
-
- l = list->free.next;
- while(l != &list->free) {
- DRMLISTDEL(l);
- node = DRMLISTENTRY(drmBONode, l, head);
- free(node);
- l = list->free.next;
- list->numCurrent--;
- }
-}
-
-static int drmAdjustListNodes(drmBOList *list)
-{
- drmBONode *node;
- drmMMListHead *l;
- int ret = 0;
-
- while(list->numCurrent < list->numTarget) {
- node = (drmBONode *) malloc(sizeof(*node));
- if (!node) {
- ret = -ENOMEM;
- break;
- }
- list->numCurrent++;
- DRMLISTADD(&node->head, &list->free);
- }
-
- while(list->numCurrent > list->numTarget) {
- l = list->free.next;
- if (l == &list->free)
- break;
- DRMLISTDEL(l);
- node = DRMLISTENTRY(drmBONode, l, head);
- free(node);
- list->numCurrent--;
- }
- return ret;
-}
-
-static int drmBOCreateList(int numTarget, drmBOList *list)
-{
- DRMINITLISTHEAD(&list->list);
- DRMINITLISTHEAD(&list->free);
- list->numTarget = numTarget;
- list->numCurrent = 0;
- list->numOnList = 0;
- return drmAdjustListNodes(list);
-}
-
-static int drmBOResetList(drmBOList *list)
-{
- drmMMListHead *l;
- int ret;
-
- ret = drmAdjustListNodes(list);
- if (ret)
- return ret;
-
- l = list->list.next;
- while (l != &list->list) {
- DRMLISTDEL(l);
- DRMLISTADD(l, &list->free);
- list->numOnList--;
- l = list->list.next;
- }
- return drmAdjustListNodes(list);
-}
-
-void driWriteLockKernelBO(void)
-{
- _glthread_LOCK_MUTEX(bmMutex);
- while(kernelReaders != 0)
- _glthread_COND_WAIT(bmCond, bmMutex);
-}
-
-void driWriteUnlockKernelBO(void)
-{
- _glthread_UNLOCK_MUTEX(bmMutex);
-}
-
-void driReadLockKernelBO(void)
-{
- _glthread_LOCK_MUTEX(bmMutex);
- kernelReaders++;
- _glthread_UNLOCK_MUTEX(bmMutex);
-}
-
-void driReadUnlockKernelBO(void)
-{
- _glthread_LOCK_MUTEX(bmMutex);
- if (--kernelReaders == 0)
- _glthread_COND_BROADCAST(bmCond);
- _glthread_UNLOCK_MUTEX(bmMutex);
-}
-
-
-
-
-/*
- * TODO: Introduce fence pools in the same way as
- * buffer object pools.
- */
-
-typedef struct _DriBufferObject
-{
- DriBufferPool *pool;
- _glthread_Mutex mutex;
- int refCount;
- const char *name;
- uint64_t flags;
- unsigned hint;
- unsigned alignment;
- unsigned createdByReference;
- void *private;
- /* user-space buffer: */
- unsigned userBuffer;
- void *userData;
- unsigned userSize;
-} DriBufferObject;
-
-typedef struct _DriBufferList {
- drmBOList drmBuffers; /* List of kernel buffers needing validation */
- drmBOList driBuffers; /* List of user-space buffers needing validation */
-} DriBufferList;
-
-
-void
-bmError(int val, const char *file, const char *function, int line)
-{
- _mesa_printf("Fatal video memory manager error \"%s\".\n"
- "Check kernel logs or set the LIBGL_DEBUG\n"
- "environment variable to \"verbose\" for more info.\n"
- "Detected in file %s, line %d, function %s.\n",
- strerror(-val), file, line, function);
-#ifndef NDEBUG
- abort();
-#else
- abort();
-#endif
-}
-
-extern drmBO *
-driBOKernel(struct _DriBufferObject *buf)
-{
- drmBO *ret;
-
- driReadLockKernelBO();
- _glthread_LOCK_MUTEX(buf->mutex);
- assert(buf->private != NULL);
- ret = buf->pool->kernel(buf->pool, buf->private);
- if (!ret)
- BM_CKFATAL(-EINVAL);
- _glthread_UNLOCK_MUTEX(buf->mutex);
- driReadUnlockKernelBO();
-
- return ret;
-}
-
-void
-driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
-{
-
- /*
- * This function may block. Is it sane to keep the mutex held during
- * that time??
- */
-
- _glthread_LOCK_MUTEX(buf->mutex);
- BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-void *
-driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
-{
- void *virtual;
- int retval;
-
- if (buf->userBuffer) {
- return buf->userData;
- }
-
- _glthread_LOCK_MUTEX(buf->mutex);
- assert(buf->private != NULL);
- retval = buf->pool->map(buf->pool, buf->private, flags, hint,
- &buf->mutex, &virtual);
- _glthread_UNLOCK_MUTEX(buf->mutex);
-
- return retval == 0 ? virtual : NULL;
-}
-
-void
-driBOUnmap(struct _DriBufferObject *buf)
-{
- if (buf->userBuffer)
- return;
-
- assert(buf->private != NULL);
- _glthread_LOCK_MUTEX(buf->mutex);
- BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-unsigned long
-driBOOffset(struct _DriBufferObject *buf)
-{
- unsigned long ret;
-
- assert(buf->private != NULL);
-
- _glthread_LOCK_MUTEX(buf->mutex);
- ret = buf->pool->offset(buf->pool, buf->private);
- _glthread_UNLOCK_MUTEX(buf->mutex);
- return ret;
-}
-
-unsigned long
-driBOPoolOffset(struct _DriBufferObject *buf)
-{
- unsigned long ret;
-
- assert(buf->private != NULL);
-
- _glthread_LOCK_MUTEX(buf->mutex);
- ret = buf->pool->poolOffset(buf->pool, buf->private);
- _glthread_UNLOCK_MUTEX(buf->mutex);
- return ret;
-}
-
-uint64_t
-driBOFlags(struct _DriBufferObject *buf)
-{
- uint64_t ret;
-
- assert(buf->private != NULL);
-
- driReadLockKernelBO();
- _glthread_LOCK_MUTEX(buf->mutex);
- ret = buf->pool->flags(buf->pool, buf->private);
- _glthread_UNLOCK_MUTEX(buf->mutex);
- driReadUnlockKernelBO();
- return ret;
-}
-
-struct _DriBufferObject *
-driBOReference(struct _DriBufferObject *buf)
-{
- _glthread_LOCK_MUTEX(buf->mutex);
- if (++buf->refCount == 1) {
- _glthread_UNLOCK_MUTEX(buf->mutex);
- BM_CKFATAL(-EINVAL);
- }
- _glthread_UNLOCK_MUTEX(buf->mutex);
- return buf;
-}
-
-void
-driBOUnReference(struct _DriBufferObject *buf)
-{
- int tmp;
-
- if (!buf)
- return;
-
- _glthread_LOCK_MUTEX(buf->mutex);
- tmp = --buf->refCount;
- if (!tmp) {
- _glthread_UNLOCK_MUTEX(buf->mutex);
- if (buf->private) {
- if (buf->createdByReference)
- buf->pool->unreference(buf->pool, buf->private);
- else
- buf->pool->destroy(buf->pool, buf->private);
- }
- if (buf->userBuffer)
- num_user_buffers--;
- else
- num_buffers--;
- free(buf);
- } else
- _glthread_UNLOCK_MUTEX(buf->mutex);
-
-}
-
-
-int
-driBOData(struct _DriBufferObject *buf,
- unsigned size, const void *data,
- DriBufferPool *newPool,
- uint64_t flags)
-{
- void *virtual = NULL;
- int newBuffer;
- int retval = 0;
- struct _DriBufferPool *pool;
-
- assert(!buf->userBuffer); /* XXX just do a memcpy? */
-
- _glthread_LOCK_MUTEX(buf->mutex);
- pool = buf->pool;
-
- if (pool == NULL && newPool != NULL) {
- buf->pool = newPool;
- pool = newPool;
- }
- if (newPool == NULL)
- newPool = pool;
-
- if (!pool->create) {
- _mesa_error(NULL, GL_INVALID_OPERATION,
- "driBOData called on invalid buffer\n");
- BM_CKFATAL(-EINVAL);
- }
-
- newBuffer = (!buf->private || pool != newPool ||
- pool->size(pool, buf->private) < size);
-
- if (!flags)
- flags = buf->flags;
-
- if (newBuffer) {
-
- if (buf->createdByReference) {
- _mesa_error(NULL, GL_INVALID_OPERATION,
- "driBOData requiring resizing called on "
- "shared buffer.\n");
- BM_CKFATAL(-EINVAL);
- }
-
- if (buf->private)
- buf->pool->destroy(buf->pool, buf->private);
-
- pool = newPool;
- buf->pool = newPool;
- buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
- buf->alignment);
- if (!buf->private)
- retval = -ENOMEM;
-
- if (retval == 0)
- retval = pool->map(pool, buf->private,
- DRM_BO_FLAG_WRITE,
- DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
- } else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
- DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
- /*
- * Buffer is busy. need to create a new one.
- */
-
- void *newBuf;
-
- newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
- buf->alignment);
- if (newBuf) {
- buf->pool->destroy(buf->pool, buf->private);
- buf->private = newBuf;
- }
-
- retval = pool->map(pool, buf->private,
- DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
- } else {
- uint64_t flag_diff = flags ^ buf->flags;
-
- /*
- * We might need to change buffer flags.
- */
-
- if (flag_diff){
- assert(pool->setStatus != NULL);
- BM_CKFATAL(pool->unmap(pool, buf->private));
- BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
- buf->flags));
- if (!data)
- goto out;
-
- retval = pool->map(pool, buf->private,
- DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
- }
- }
-
- if (retval == 0) {
- if (data)
- memcpy(virtual, data, size);
-
- BM_CKFATAL(pool->unmap(pool, buf->private));
- }
-
- out:
- _glthread_UNLOCK_MUTEX(buf->mutex);
-
- return retval;
-}
-
-void
-driBOSubData(struct _DriBufferObject *buf,
- unsigned long offset, unsigned long size, const void *data)
-{
- void *virtual;
-
- assert(!buf->userBuffer); /* XXX just do a memcpy? */
-
- _glthread_LOCK_MUTEX(buf->mutex);
- if (size && data) {
- BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
- DRM_BO_FLAG_WRITE, 0, &buf->mutex,
- &virtual));
- memcpy((unsigned char *) virtual + offset, data, size);
- BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
- }
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-void
-driBOGetSubData(struct _DriBufferObject *buf,
- unsigned long offset, unsigned long size, void *data)
-{
- void *virtual;
-
- assert(!buf->userBuffer); /* XXX just do a memcpy? */
-
- _glthread_LOCK_MUTEX(buf->mutex);
- if (size && data) {
- BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
- DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
- memcpy(data, (unsigned char *) virtual + offset, size);
- BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
- }
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-void
-driBOSetReferenced(struct _DriBufferObject *buf,
- unsigned long handle)
-{
- _glthread_LOCK_MUTEX(buf->mutex);
- if (buf->private != NULL) {
- _mesa_error(NULL, GL_INVALID_OPERATION,
- "Invalid buffer for setReferenced\n");
- BM_CKFATAL(-EINVAL);
-
- }
- if (buf->pool->reference == NULL) {
- _mesa_error(NULL, GL_INVALID_OPERATION,
- "Invalid buffer pool for setReferenced\n");
- BM_CKFATAL(-EINVAL);
- }
- buf->private = buf->pool->reference(buf->pool, handle);
- if (!buf->private) {
- _mesa_error(NULL, GL_OUT_OF_MEMORY,
- "Invalid buffer pool for setStatic\n");
- BM_CKFATAL(-ENOMEM);
- }
- buf->createdByReference = GL_TRUE;
- buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-int
-driGenBuffers(struct _DriBufferPool *pool,
- const char *name,
- unsigned n,
- struct _DriBufferObject *buffers[],
- unsigned alignment, uint64_t flags, unsigned hint)
-{
- struct _DriBufferObject *buf;
- int i;
-
- flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
-
- ++num_buffers;
-
- assert(pool);
-
- for (i = 0; i < n; ++i) {
- buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
- if (!buf)
- return -ENOMEM;
-
- _glthread_INIT_MUTEX(buf->mutex);
- _glthread_LOCK_MUTEX(buf->mutex);
- buf->refCount = 1;
- buf->flags = flags;
- buf->hint = hint;
- buf->name = name;
- buf->alignment = alignment;
- buf->pool = pool;
- buf->createdByReference = 0;
- _glthread_UNLOCK_MUTEX(buf->mutex);
- buffers[i] = buf;
- }
- return 0;
-}
-
-void
-driGenUserBuffer(struct _DriBufferPool *pool,
- const char *name,
- struct _DriBufferObject **buffers,
- void *ptr, unsigned bytes)
-{
- const unsigned alignment = 1, flags = 0, hint = 0;
-
- --num_buffers; /* JB: is inced in GenBuffes */
- driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
- ++num_user_buffers;
-
- (*buffers)->userBuffer = 1;
- (*buffers)->userData = ptr;
- (*buffers)->userSize = bytes;
-}
-
-void
-driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
-{
- int i;
-
- for (i = 0; i < n; ++i) {
- driBOUnReference(buffers[i]);
- }
-}
-
-
-void
-driInitBufMgr(int fd)
-{
- ;
-}
-
-/*
- * Note that lists are per-context and don't need mutex protection.
- */
-
-struct _DriBufferList *
-driBOCreateList(int target)
-{
- struct _DriBufferList *list = calloc(sizeof(*list), 1);
-
- BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
- BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
- return list;
-}
-
-int
-driBOResetList(struct _DriBufferList * list)
-{
- int ret;
- ret = drmBOResetList(&list->drmBuffers);
- if (ret)
- return ret;
- ret = drmBOResetList(&list->driBuffers);
- return ret;
-}
-
-void
-driBOFreeList(struct _DriBufferList * list)
-{
- drmBOFreeList(&list->drmBuffers);
- drmBOFreeList(&list->driBuffers);
- free(list);
-}
-
-
-/*
- * Copied from libdrm, because it is needed by driAddValidateItem.
- */
-
-static drmBONode *
-driAddListItem(drmBOList * list, drmBO * item,
- uint64_t arg0, uint64_t arg1)
-{
- drmBONode *node;
- drmMMListHead *l;
-
- l = list->free.next;
- if (l == &list->free) {
- node = (drmBONode *) malloc(sizeof(*node));
- if (!node) {
- return NULL;
- }
- list->numCurrent++;
- } else {
- DRMLISTDEL(l);
- node = DRMLISTENTRY(drmBONode, l, head);
- }
- memset(&node->bo_arg, 0, sizeof(node->bo_arg));
- node->buf = item;
- node->arg0 = arg0;
- node->arg1 = arg1;
- DRMLISTADDTAIL(&node->head, &list->list);
- list->numOnList++;
- return node;
-}
-
-/*
- * Slightly modified version compared to the libdrm version.
- * This one returns the list index of the buffer put on the list.
- */
-
-static int
-driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
- uint64_t mask, int *itemLoc,
- struct _drmBONode **pnode)
-{
- drmBONode *node, *cur;
- drmMMListHead *l;
- int count = 0;
-
- cur = NULL;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
- if (node->buf == buf) {
- cur = node;
- break;
- }
- count++;
- }
- if (!cur) {
- cur = driAddListItem(list, buf, flags, mask);
- if (!cur)
- return -ENOMEM;
-
- cur->arg0 = flags;
- cur->arg1 = mask;
- } else {
- uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
- uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
-
- if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
- return -EINVAL;
- }
-
- cur->arg1 |= mask;
- cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
-
- if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
- (cur->arg0 & DRM_BO_MASK_MEM) == 0) {
- return -EINVAL;
- }
- }
- *itemLoc = count;
- *pnode = cur;
- return 0;
-}
-
-
-void
-driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
- uint64_t flags, uint64_t mask, int *itemLoc,
- struct _drmBONode **node)
-{
- int newItem;
-
- _glthread_LOCK_MUTEX(buf->mutex);
- BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
- buf->pool->kernel(buf->pool, buf->private),
- flags, mask, itemLoc, node));
- BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
- flags, mask, &newItem));
- if (newItem)
- buf->refCount++;
-
- _glthread_UNLOCK_MUTEX(buf->mutex);
-}
-
-drmBOList *driGetdrmBOList(struct _DriBufferList *list)
-{
- driWriteLockKernelBO();
- return &list->drmBuffers;
-}
-
-void driPutdrmBOList(struct _DriBufferList *list)
-{
- driWriteUnlockKernelBO();
-}
-
-
-void
-driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
-{
- _glthread_LOCK_MUTEX(buf->mutex);
- if (buf->pool->fence)
- BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
- _glthread_UNLOCK_MUTEX(buf->mutex);
-
-}
-
-void
-driBOUnrefUserList(struct _DriBufferList *list)
-{
- struct _DriBufferObject *buf;
- void *curBuf;
-
- curBuf = drmBOListIterator(&list->driBuffers);
- while (curBuf) {
- buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
- driBOUnReference(buf);
- curBuf = drmBOListNext(&list->driBuffers, curBuf);
- }
-}
-
-struct _DriFenceObject *
-driBOFenceUserList(struct _DriFenceMgr *mgr,
- struct _DriBufferList *list, const char *name,
- drmFence *kFence)
-{
- struct _DriFenceObject *fence;
- struct _DriBufferObject *buf;
- void *curBuf;
-
- fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
- kFence, sizeof(*kFence));
- curBuf = drmBOListIterator(&list->driBuffers);
-
- /*
- * User-space fencing callbacks.
- */
-
- while (curBuf) {
- buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
- driBOFence(buf, fence);
- driBOUnReference(buf);
- curBuf = drmBOListNext(&list->driBuffers, curBuf);
- }
-
- driBOResetList(list);
- return fence;
-}
-
-void
-driBOValidateUserList(struct _DriBufferList * list)
-{
- void *curBuf;
- struct _DriBufferObject *buf;
-
- curBuf = drmBOListIterator(&list->driBuffers);
-
- /*
- * User-space validation callbacks.
- */
-
- while (curBuf) {
- buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
- _glthread_LOCK_MUTEX(buf->mutex);
- if (buf->pool->validate)
- BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
- _glthread_UNLOCK_MUTEX(buf->mutex);
- curBuf = drmBOListNext(&list->driBuffers, curBuf);
- }
-}
-
-
-void
-driPoolTakeDown(struct _DriBufferPool *pool)
-{
- pool->takeDown(pool);
-
-}
-
-unsigned long
-driBOSize(struct _DriBufferObject *buf)
-{
- unsigned long size;
-
- _glthread_LOCK_MUTEX(buf->mutex);
- size = buf->pool->size(buf->pool, buf->private);
- _glthread_UNLOCK_MUTEX(buf->mutex);
-
- return size;
-
-}
-
-drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
-{
- return &list->drmBuffers;
-}
-
-drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
-{
- return &list->driBuffers;
-}
-
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- */
-
-#ifndef _PSB_BUFMGR_H_
-#define _PSB_BUFMGR_H_
-#include <xf86mm.h>
-#include "i915_drm.h"
-#include "ws_dri_fencemgr.h"
-
-typedef struct _drmBONode
-{
- drmMMListHead head;
- drmBO *buf;
- struct drm_i915_op_arg bo_arg;
- uint64_t arg0;
- uint64_t arg1;
-} drmBONode;
-
-typedef struct _drmBOList {
- unsigned numTarget;
- unsigned numCurrent;
- unsigned numOnList;
- drmMMListHead list;
- drmMMListHead free;
-} drmBOList;
-
-
-struct _DriFenceObject;
-struct _DriBufferObject;
-struct _DriBufferPool;
-struct _DriBufferList;
-
-/*
- * Return a pointer to the libdrm buffer object this DriBufferObject
- * uses.
- */
-
-extern drmBO *driBOKernel(struct _DriBufferObject *buf);
-extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
- unsigned hint);
-extern void driBOUnmap(struct _DriBufferObject *buf);
-extern unsigned long driBOOffset(struct _DriBufferObject *buf);
-extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
-
-extern uint64_t driBOFlags(struct _DriBufferObject *buf);
-extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
-extern void driBOUnReference(struct _DriBufferObject *buf);
-
-extern int driBOData(struct _DriBufferObject *r_buf,
- unsigned size, const void *data,
- struct _DriBufferPool *pool, uint64_t flags);
-
-extern void driBOSubData(struct _DriBufferObject *buf,
- unsigned long offset, unsigned long size,
- const void *data);
-extern void driBOGetSubData(struct _DriBufferObject *buf,
- unsigned long offset, unsigned long size,
- void *data);
-extern int driGenBuffers(struct _DriBufferPool *pool,
- const char *name,
- unsigned n,
- struct _DriBufferObject *buffers[],
- unsigned alignment, uint64_t flags, unsigned hint);
-extern void driGenUserBuffer(struct _DriBufferPool *pool,
- const char *name,
- struct _DriBufferObject *buffers[],
- void *ptr, unsigned bytes);
-extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
-extern void driInitBufMgr(int fd);
-extern struct _DriBufferList *driBOCreateList(int target);
-extern int driBOResetList(struct _DriBufferList * list);
-extern void driBOAddListItem(struct _DriBufferList * list,
- struct _DriBufferObject *buf,
- uint64_t flags, uint64_t mask, int *itemLoc,
- struct _drmBONode **node);
-
-extern void driBOValidateList(int fd, struct _DriBufferList * list);
-extern void driBOFreeList(struct _DriBufferList * list);
-extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
- struct _DriBufferList *list,
- const char *name,
- drmFence *kFence);
-extern void driBOUnrefUserList(struct _DriBufferList *list);
-extern void driBOValidateUserList(struct _DriBufferList * list);
-extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
-extern void driPutdrmBOList(struct _DriBufferList *list);
-
-extern void driBOFence(struct _DriBufferObject *buf,
- struct _DriFenceObject *fence);
-
-extern void driPoolTakeDown(struct _DriBufferPool *pool);
-extern void driBOSetReferenced(struct _DriBufferObject *buf,
- unsigned long handle);
-unsigned long driBOSize(struct _DriBufferObject *buf);
-extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
-extern void driPoolTakeDown(struct _DriBufferPool *pool);
-
-extern void driReadLockKernelBO(void);
-extern void driReadUnlockKernelBO(void);
-extern void driWriteLockKernelBO(void);
-extern void driWriteUnlockKernelBO(void);
-
-/*
- * For debugging purposes.
- */
-
-extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
-extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
-#endif
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef _PSB_BUFPOOL_H_
-#define _PSB_BUFPOOL_H_
-
-#include <xf86drm.h>
-#include <glthread.h>
-struct _DriFenceObject;
-
-typedef struct _DriBufferPool
-{
- int fd;
- int (*map) (struct _DriBufferPool * pool, void *private,
- unsigned flags, int hint, _glthread_Mutex *mutex,
- void **virtual);
- int (*unmap) (struct _DriBufferPool * pool, void *private);
- int (*destroy) (struct _DriBufferPool * pool, void *private);
- unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
- unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
- uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
- unsigned long (*size) (struct _DriBufferPool * pool, void *private);
- void *(*create) (struct _DriBufferPool * pool, unsigned long size,
- uint64_t flags, unsigned hint, unsigned alignment);
- void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
- int (*unreference) (struct _DriBufferPool * pool, void *private);
- int (*fence) (struct _DriBufferPool * pool, void *private,
- struct _DriFenceObject * fence);
- drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
- int (*validate) (struct _DriBufferPool * pool, void *private, _glthread_Mutex *mutex);
- int (*waitIdle) (struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
- int lazy);
- int (*setStatus) (struct _DriBufferPool *pool, void *private,
- uint64_t flag_diff, uint64_t old_flags);
- void (*takeDown) (struct _DriBufferPool * pool);
- void *data;
-} DriBufferPool;
-
-extern void bmError(int val, const char *file, const char *function,
- int line);
-#define BM_CKFATAL(val) \
- do{ \
- int tstVal = (val); \
- if (tstVal) \
- bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
- } while(0);
-
-
-/*
- * Builtin pools.
- */
-
-/*
- * Kernel buffer objects. Size in multiples of page size. Page size aligned.
- */
-
-extern struct _DriBufferPool *driDRMPoolInit(int fd);
-extern struct _DriBufferPool *driMallocPoolInit(void);
-
-struct _DriFreeSlabManager;
-extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
- uint64_t validMask,
- uint32_t smallestSize,
- uint32_t numSizes,
- uint32_t desiredNumBuffers,
- uint32_t maxSlabSize,
- uint32_t pageAlignment,
- struct _DriFreeSlabManager *fMan);
-extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
-extern struct _DriFreeSlabManager *
-driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
-
-
-#endif
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include "ws_dri_bufpool.h"
-#include "ws_dri_bufmgr.h"
-#include "assert.h"
-
-/*
- * Buffer pool implementation using DRM buffer objects as DRI buffer objects.
- */
-
-static void *
-pool_create(struct _DriBufferPool *pool,
- unsigned long size, uint64_t flags, unsigned hint,
- unsigned alignment)
-{
- drmBO *buf = (drmBO *) malloc(sizeof(*buf));
- int ret;
- unsigned pageSize = getpagesize();
-
- if (!buf)
- return NULL;
-
- if ((alignment > pageSize) && (alignment % pageSize)) {
- free(buf);
- return NULL;
- }
-
- ret = drmBOCreate(pool->fd, size, alignment / pageSize,
- NULL,
- flags, hint, buf);
- if (ret) {
- free(buf);
- return NULL;
- }
-
- return (void *) buf;
-}
-
-static void *
-pool_reference(struct _DriBufferPool *pool, unsigned handle)
-{
- drmBO *buf = (drmBO *) malloc(sizeof(*buf));
- int ret;
-
- if (!buf)
- return NULL;
-
- ret = drmBOReference(pool->fd, handle, buf);
-
- if (ret) {
- free(buf);
- return NULL;
- }
-
- return (void *) buf;
-}
-
-static int
-pool_destroy(struct _DriBufferPool *pool, void *private)
-{
- int ret;
- drmBO *buf = (drmBO *) private;
- driReadLockKernelBO();
- ret = drmBOUnreference(pool->fd, buf);
- free(buf);
- driReadUnlockKernelBO();
- return ret;
-}
-
-static int
-pool_unreference(struct _DriBufferPool *pool, void *private)
-{
- int ret;
- drmBO *buf = (drmBO *) private;
- driReadLockKernelBO();
- ret = drmBOUnreference(pool->fd, buf);
- free(buf);
- driReadUnlockKernelBO();
- return ret;
-}
-
-static int
-pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
- int hint, _glthread_Mutex *mutex, void **virtual)
-{
- drmBO *buf = (drmBO *) private;
- int ret;
-
- driReadLockKernelBO();
- ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
- driReadUnlockKernelBO();
- return ret;
-}
-
-static int
-pool_unmap(struct _DriBufferPool *pool, void *private)
-{
- drmBO *buf = (drmBO *) private;
- int ret;
-
- driReadLockKernelBO();
- ret = drmBOUnmap(pool->fd, buf);
- driReadUnlockKernelBO();
-
- return ret;
-}
-
-static unsigned long
-pool_offset(struct _DriBufferPool *pool, void *private)
-{
- drmBO *buf = (drmBO *) private;
- unsigned long offset;
-
- driReadLockKernelBO();
- assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
- offset = buf->offset;
- driReadUnlockKernelBO();
-
- return buf->offset;
-}
-
-static unsigned long
-pool_poolOffset(struct _DriBufferPool *pool, void *private)
-{
- return 0;
-}
-
-static uint64_t
-pool_flags(struct _DriBufferPool *pool, void *private)
-{
- drmBO *buf = (drmBO *) private;
- uint64_t flags;
-
- driReadLockKernelBO();
- flags = buf->flags;
- driReadUnlockKernelBO();
-
- return flags;
-}
-
-
-static unsigned long
-pool_size(struct _DriBufferPool *pool, void *private)
-{
- drmBO *buf = (drmBO *) private;
- unsigned long size;
-
- driReadLockKernelBO();
- size = buf->size;
- driReadUnlockKernelBO();
-
- return buf->size;
-}
-
-static int
-pool_fence(struct _DriBufferPool *pool, void *private,
- struct _DriFenceObject *fence)
-{
- /*
- * Noop. The kernel handles all fencing.
- */
-
- return 0;
-}
-
-static drmBO *
-pool_kernel(struct _DriBufferPool *pool, void *private)
-{
- return (drmBO *) private;
-}
-
-static int
-pool_waitIdle(struct _DriBufferPool *pool, void *private, _glthread_Mutex *mutex,
- int lazy)
-{
- drmBO *buf = (drmBO *) private;
- int ret;
-
- driReadLockKernelBO();
- ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
- driReadUnlockKernelBO();
-
- return ret;
-}
-
-
-static void
-pool_takedown(struct _DriBufferPool *pool)
-{
- free(pool);
-}
-
-/*static int
-pool_setStatus(struct _DriBufferPool *pool, void *private,
- uint64_t flag_diff, uint64_t old_flags)
-{
- drmBO *buf = (drmBO *) private;
- uint64_t new_flags = old_flags ^ flag_diff;
- int ret;
-
- driReadLockKernelBO();
- ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
- 0, 0, 0);
- driReadUnlockKernelBO();
- return ret;
-}*/
-
-struct _DriBufferPool *
-driDRMPoolInit(int fd)
-{
- struct _DriBufferPool *pool;
-
- pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
-
- if (!pool)
- return NULL;
-
- pool->fd = fd;
- pool->map = &pool_map;
- pool->unmap = &pool_unmap;
- pool->destroy = &pool_destroy;
- pool->offset = &pool_offset;
- pool->poolOffset = &pool_poolOffset;
- pool->flags = &pool_flags;
- pool->size = &pool_size;
- pool->create = &pool_create;
- pool->fence = &pool_fence;
- pool->kernel = &pool_kernel;
- pool->validate = NULL;
- pool->waitIdle = &pool_waitIdle;
- pool->takeDown = &pool_takedown;
- pool->reference = &pool_reference;
- pool->unreference = &pool_unreference;
- pool->data = NULL;
- return pool;
-}
+++ /dev/null
-#include "ws_dri_fencemgr.h"
-#include "glthread.h"
-#include <xf86mm.h>
-#include <string.h>
-#include <unistd.h>
-
-/*
- * Note: Locking order is
- * _DriFenceObject::mutex
- * _DriFenceMgr::mutex
- */
-
-struct _DriFenceMgr {
- /*
- * Constant members. Need no mutex protection.
- */
- struct _DriFenceMgrCreateInfo info;
- void *private;
-
- /*
- * These members are protected by this->mutex
- */
- _glthread_Mutex mutex;
- int refCount;
- drmMMListHead *heads;
- int num_fences;
-};
-
-struct _DriFenceObject {
-
- /*
- * These members are constant and need no mutex protection.
- */
- struct _DriFenceMgr *mgr;
- uint32_t fence_class;
- uint32_t fence_type;
-
- /*
- * These members are protected by mgr->mutex.
- */
- drmMMListHead head;
- int refCount;
-
- /*
- * These members are protected by this->mutex.
- */
- _glthread_Mutex mutex;
- uint32_t signaled_type;
- void *private;
-};
-
-uint32_t
-driFenceType(struct _DriFenceObject *fence)
-{
- return fence->fence_type;
-}
-
-struct _DriFenceMgr *
-driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
-{
- struct _DriFenceMgr *tmp;
- uint32_t i;
-
- tmp = calloc(1, sizeof(*tmp));
- if (!tmp)
- return NULL;
-
- _glthread_INIT_MUTEX(tmp->mutex);
- _glthread_LOCK_MUTEX(tmp->mutex);
- tmp->refCount = 1;
- tmp->info = *info;
- tmp->num_fences = 0;
- tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
- if (!tmp->heads)
- goto out_err;
-
- for (i=0; i<tmp->info.num_classes; ++i) {
- DRMINITLISTHEAD(&tmp->heads[i]);
- }
- _glthread_UNLOCK_MUTEX(tmp->mutex);
- return tmp;
-
- out_err:
- if (tmp)
- free(tmp);
- return NULL;
-}
-
-static void
-driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
-{
- struct _DriFenceMgr *mgr = *pMgr;
-
- *pMgr = NULL;
- if (--mgr->refCount == 0)
- free(mgr);
- else
- _glthread_UNLOCK_MUTEX(mgr->mutex);
-}
-
-void
-driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
-{
- _glthread_LOCK_MUTEX((*pMgr)->mutex);
- driFenceMgrUnrefUnlock(pMgr);
-}
-
-static void
-driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
-{
- struct _DriFenceObject *fence = *pFence;
- struct _DriFenceMgr *mgr = fence->mgr;
-
- *pFence = NULL;
- if (--fence->refCount == 0) {
- DRMLISTDELINIT(&fence->head);
- if (fence->private)
- mgr->info.unreference(mgr, &fence->private);
- --mgr->num_fences;
- fence->mgr = NULL;
- --mgr->refCount;
- free(fence);
-
- }
-}
-
-
-static void
-driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
- drmMMListHead *list,
- uint32_t fence_class,
- uint32_t fence_type)
-{
- struct _DriFenceObject *entry;
- drmMMListHead *prev;
-
- while(list != &mgr->heads[fence_class]) {
- entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
-
- /*
- * Up refcount so that entry doesn't disappear from under us
- * when we unlock-relock mgr to get the correct locking order.
- */
-
- ++entry->refCount;
- _glthread_UNLOCK_MUTEX(mgr->mutex);
- _glthread_LOCK_MUTEX(entry->mutex);
- _glthread_LOCK_MUTEX(mgr->mutex);
-
- prev = list->prev;
-
-
-
- if (list->prev == list) {
-
- /*
- * Somebody else removed the entry from the list.
- */
-
- _glthread_UNLOCK_MUTEX(entry->mutex);
- driFenceUnReferenceLocked(&entry);
- return;
- }
-
- entry->signaled_type |= (fence_type & entry->fence_type);
- if (entry->signaled_type == entry->fence_type) {
- DRMLISTDELINIT(list);
- mgr->info.unreference(mgr, &entry->private);
- }
- _glthread_UNLOCK_MUTEX(entry->mutex);
- driFenceUnReferenceLocked(&entry);
- list = prev;
- }
-}
-
-
-int
-driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
- int lazy_hint)
-{
- struct _DriFenceMgr *mgr = fence->mgr;
- int ret = 0;
-
- _glthread_LOCK_MUTEX(fence->mutex);
-
- if ((fence->signaled_type & fence_type) == fence_type)
- goto out0;
-
- ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
- if (ret)
- goto out0;
-
- _glthread_LOCK_MUTEX(mgr->mutex);
- _glthread_UNLOCK_MUTEX(fence->mutex);
-
- driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
- fence_type);
- _glthread_UNLOCK_MUTEX(mgr->mutex);
- return 0;
-
- out0:
- _glthread_UNLOCK_MUTEX(fence->mutex);
- return ret;
-}
-
-uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
-{
- uint32_t ret;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = fence->signaled_type;
- _glthread_UNLOCK_MUTEX(fence->mutex);
-
- return ret;
-}
-
-int
-driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
- uint32_t *signaled)
-{
- int ret = 0;
- struct _DriFenceMgr *mgr;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- mgr = fence->mgr;
- *signaled = fence->signaled_type;
- if ((fence->signaled_type & flush_type) == flush_type)
- goto out0;
-
- ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
- if (ret) {
- *signaled = fence->signaled_type;
- goto out0;
- }
-
- if ((fence->signaled_type | *signaled) == fence->signaled_type)
- goto out0;
-
- _glthread_LOCK_MUTEX(mgr->mutex);
- _glthread_UNLOCK_MUTEX(fence->mutex);
-
- driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
- *signaled);
-
- _glthread_UNLOCK_MUTEX(mgr->mutex);
- return 0;
- out0:
- _glthread_UNLOCK_MUTEX(fence->mutex);
- return ret;
-}
-
-struct _DriFenceObject *
-driFenceReference(struct _DriFenceObject *fence)
-{
- _glthread_LOCK_MUTEX(fence->mgr->mutex);
- ++fence->refCount;
- _glthread_UNLOCK_MUTEX(fence->mgr->mutex);
- return fence;
-}
-
-void
-driFenceUnReference(struct _DriFenceObject **pFence)
-{
- struct _DriFenceMgr *mgr;
-
- if (*pFence == NULL)
- return;
-
- mgr = (*pFence)->mgr;
- _glthread_LOCK_MUTEX(mgr->mutex);
- ++mgr->refCount;
- driFenceUnReferenceLocked(pFence);
- driFenceMgrUnrefUnlock(&mgr);
-}
-
-struct _DriFenceObject
-*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
- uint32_t fence_type, void *private, size_t private_size)
-{
- struct _DriFenceObject *fence;
- size_t fence_size = sizeof(*fence);
-
- if (private_size)
- fence_size = ((fence_size + 15) & ~15);
-
- fence = calloc(1, fence_size + private_size);
-
- if (!fence) {
- int ret = mgr->info.finish(mgr, private, fence_type, 0);
-
- if (ret)
- usleep(10000000);
-
- return NULL;
- }
-
- _glthread_INIT_MUTEX(fence->mutex);
- _glthread_LOCK_MUTEX(fence->mutex);
- _glthread_LOCK_MUTEX(mgr->mutex);
- fence->refCount = 1;
- DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
- fence->mgr = mgr;
- ++mgr->refCount;
- ++mgr->num_fences;
- _glthread_UNLOCK_MUTEX(mgr->mutex);
- fence->fence_class = fence_class;
- fence->fence_type = fence_type;
- fence->signaled_type = 0;
- fence->private = private;
- if (private_size) {
- fence->private = (void *)(((uint8_t *) fence) + fence_size);
- memcpy(fence->private, private, private_size);
- }
-
- _glthread_UNLOCK_MUTEX(fence->mutex);
- return fence;
-}
-
-
-static int
-tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
- uint32_t *signaled_type)
-{
- long fd = (long) mgr->private;
- int dummy;
- drmFence *fence = (drmFence *) private;
- int ret;
-
- *signaled_type = 0;
- ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
- if (ret)
- return ret;
-
- *signaled_type = fence->signaled;
-
- return 0;
-}
-
-static int
-tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
- int lazy_hint)
-{
- long fd = (long) mgr->private;
- unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
-
- return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
-}
-
-static int
-tUnref(struct _DriFenceMgr *mgr, void **private)
-{
- long fd = (long) mgr->private;
- drmFence *fence = (drmFence *) *private;
- *private = NULL;
-
- return drmFenceUnreference(fd, fence);
-}
-
-struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
-{
- struct _DriFenceMgrCreateInfo info;
- struct _DriFenceMgr *mgr;
-
- info.flags = DRI_FENCE_CLASS_ORDERED;
- info.num_classes = 4;
- info.signaled = tSignaled;
- info.finish = tFinish;
- info.unreference = tUnref;
-
- mgr = driFenceMgrCreate(&info);
- if (mgr == NULL)
- return NULL;
-
- mgr->private = (void *) (long) fd;
- return mgr;
-}
-
+++ /dev/null
-#ifndef DRI_FENCEMGR_H
-#define DRI_FENCEMGR_H
-
-#include <stdint.h>
-#include <stdlib.h>
-
-struct _DriFenceObject;
-struct _DriFenceMgr;
-
-/*
- * Do a quick check to see if the fence manager has registered the fence
- * object as signaled. Note that this function may return a false negative
- * answer.
- */
-extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
-
-/*
- * Check if the fence object is signaled. This function can be substantially
- * more expensive to call than the above function, but will not return a false
- * negative answer. The argument "flush_type" sets the types that the
- * underlying mechanism must make sure will eventually signal.
- */
-extern int driFenceSignaledType(struct _DriFenceObject *fence,
- uint32_t flush_type, uint32_t *signaled);
-
-/*
- * Convenience functions.
- */
-
-static inline int driFenceSignaled(struct _DriFenceObject *fence,
- uint32_t flush_type)
-{
- uint32_t signaled_types;
- int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
- if (ret)
- return 0;
- return ((signaled_types & flush_type) == flush_type);
-}
-
-static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
- uint32_t flush_type)
-{
- uint32_t signaled_types =
- driFenceSignaledTypeCached(fence);
-
- return ((signaled_types & flush_type) == flush_type);
-}
-
-/*
- * Reference a fence object.
- */
-extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
-
-/*
- * Unreference a fence object. The fence object pointer will be reset to NULL.
- */
-
-extern void driFenceUnReference(struct _DriFenceObject **pFence);
-
-
-/*
- * Wait for a fence to signal the indicated fence_type.
- * If "lazy_hint" is true, it indicates that the wait may sleep to avoid
- * busy-wait polling.
- */
-extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
- int lazy_hint);
-
-/*
- * Create a DriFenceObject for manager "mgr".
- *
- * "private" is a pointer that should be used for the callbacks in
- * struct _DriFenceMgrCreateInfo.
- *
- * if private_size is nonzero, then the info stored at *private, with size
- * private size will be copied and the fence manager will instead use a
- * pointer to the copied data for the callbacks in
- * struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
- * "private" may be destroyed after the call to driFenceCreate.
- */
-extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
- uint32_t fence_class,
- uint32_t fence_type,
- void *private,
- size_t private_size);
-
-extern uint32_t driFenceType(struct _DriFenceObject *fence);
-
-/*
- * Fence creations are ordered. If a fence signals a fence_type,
- * it is safe to assume that all fences of the same class that was
- * created before that fence has signaled the same type.
- */
-
-#define DRI_FENCE_CLASS_ORDERED (1 << 0)
-
-struct _DriFenceMgrCreateInfo {
- uint32_t flags;
- uint32_t num_classes;
- int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
- uint32_t *signaled_type);
- int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
- int (*unreference) (struct _DriFenceMgr *mgr, void **private);
-};
-
-extern struct _DriFenceMgr *
-driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
-
-void
-driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
-
-extern struct _DriFenceMgr *
-driFenceMgrTTMInit(int fd);
-
-#endif
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <errno.h>
-#include "imports.h"
-#include "glthread.h"
-#include "ws_dri_bufpool.h"
-#include "ws_dri_bufmgr.h"
-#include "intel_screen.h"
-
-static void *
-pool_create(struct _DriBufferPool *pool,
- unsigned long size, uint64_t flags, unsigned hint,
- unsigned alignment)
-{
- unsigned long *private = malloc(size + 2*sizeof(unsigned long));
- if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
- abort();
-
- *private = size;
- return (void *)private;
-}
-
-
-static int
-pool_destroy(struct _DriBufferPool *pool, void *private)
-{
- free(private);
- return 0;
-}
-
-static int
-pool_waitIdle(struct _DriBufferPool *pool, void *private,
- _glthread_Mutex *mutex, int lazy)
-{
- return 0;
-}
-
-static int
-pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
- int hint, _glthread_Mutex *mutex, void **virtual)
-{
- *virtual = (void *)((unsigned long *)private + 2);
- return 0;
-}
-
-static int
-pool_unmap(struct _DriBufferPool *pool, void *private)
-{
- return 0;
-}
-
-static unsigned long
-pool_offset(struct _DriBufferPool *pool, void *private)
-{
- /*
- * BUG
- */
- abort();
- return 0UL;
-}
-
-static unsigned long
-pool_poolOffset(struct _DriBufferPool *pool, void *private)
-{
- /*
- * BUG
- */
- abort();
-}
-
-static uint64_t
-pool_flags(struct _DriBufferPool *pool, void *private)
-{
- return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
-}
-
-static unsigned long
-pool_size(struct _DriBufferPool *pool, void *private)
-{
- return *(unsigned long *) private;
-}
-
-
-static int
-pool_fence(struct _DriBufferPool *pool, void *private,
- struct _DriFenceObject *fence)
-{
- abort();
- return 0UL;
-}
-
-static drmBO *
-pool_kernel(struct _DriBufferPool *pool, void *private)
-{
- abort();
- return NULL;
-}
-
-static void
-pool_takedown(struct _DriBufferPool *pool)
-{
- free(pool);
-}
-
-
-struct _DriBufferPool *
-driMallocPoolInit(void)
-{
- struct _DriBufferPool *pool;
-
- pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
- if (!pool)
- return NULL;
-
- pool->data = NULL;
- pool->fd = -1;
- pool->map = &pool_map;
- pool->unmap = &pool_unmap;
- pool->destroy = &pool_destroy;
- pool->offset = &pool_offset;
- pool->poolOffset = &pool_poolOffset;
- pool->flags = &pool_flags;
- pool->size = &pool_size;
- pool->create = &pool_create;
- pool->fence = &pool_fence;
- pool->kernel = &pool_kernel;
- pool->validate = NULL;
- pool->waitIdle = &pool_waitIdle;
- pool->takeDown = &pool_takedown;
- return pool;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <stdint.h>
-#include <sys/time.h>
-#include <errno.h>
-#include <unistd.h>
-#include <assert.h>
-#include "ws_dri_bufpool.h"
-#include "ws_dri_fencemgr.h"
-#include "ws_dri_bufmgr.h"
-#include "glthread.h"
-
-#define DRI_SLABPOOL_ALLOC_RETRIES 100
-
-struct _DriSlab;
-
-struct _DriSlabBuffer {
- int isSlabBuffer;
- drmBO *bo;
- struct _DriFenceObject *fence;
- struct _DriSlab *parent;
- drmMMListHead head;
- uint32_t mapCount;
- uint32_t start;
- uint32_t fenceType;
- int unFenced;
- _glthread_Cond event;
-};
-
-struct _DriKernelBO {
- int fd;
- drmBO bo;
- drmMMListHead timeoutHead;
- drmMMListHead head;
- struct timeval timeFreed;
- uint32_t pageAlignment;
- void *virtual;
-};
-
-struct _DriSlab{
- drmMMListHead head;
- drmMMListHead freeBuffers;
- uint32_t numBuffers;
- uint32_t numFree;
- struct _DriSlabBuffer *buffers;
- struct _DriSlabSizeHeader *header;
- struct _DriKernelBO *kbo;
-};
-
-
-struct _DriSlabSizeHeader {
- drmMMListHead slabs;
- drmMMListHead freeSlabs;
- drmMMListHead delayedBuffers;
- uint32_t numDelayed;
- struct _DriSlabPool *slabPool;
- uint32_t bufSize;
- _glthread_Mutex mutex;
-};
-
-struct _DriFreeSlabManager {
- struct timeval slabTimeout;
- struct timeval checkInterval;
- struct timeval nextCheck;
- drmMMListHead timeoutList;
- drmMMListHead unCached;
- drmMMListHead cached;
- _glthread_Mutex mutex;
-};
-
-
-struct _DriSlabPool {
-
- /*
- * The data of this structure remains constant after
- * initialization and thus needs no mutex protection.
- */
-
- struct _DriFreeSlabManager *fMan;
- uint64_t proposedFlags;
- uint64_t validMask;
- uint32_t *bucketSizes;
- uint32_t numBuckets;
- uint32_t pageSize;
- int fd;
- int pageAlignment;
- int maxSlabSize;
- int desiredNumBuffers;
- struct _DriSlabSizeHeader *headers;
-};
-
-/*
- * FIXME: Perhaps arrange timeout slabs in size buckets for fast
- * retreival??
- */
-
-
-static inline int
-driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
-{
- return ((arg1->tv_sec > arg2->tv_sec) ||
- ((arg1->tv_sec == arg2->tv_sec) &&
- (arg1->tv_usec > arg2->tv_usec)));
-}
-
-static inline void
-driTimeAdd(struct timeval *arg, struct timeval *add)
-{
- unsigned int sec;
-
- arg->tv_sec += add->tv_sec;
- arg->tv_usec += add->tv_usec;
- sec = arg->tv_usec / 1000000;
- arg->tv_sec += sec;
- arg->tv_usec -= sec*1000000;
-}
-
-static void
-driFreeKernelBO(struct _DriKernelBO *kbo)
-{
- if (!kbo)
- return;
-
- (void) drmBOUnreference(kbo->fd, &kbo->bo);
- free(kbo);
-}
-
-
-static void
-driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
- struct timeval *time)
-{
- drmMMListHead *list, *next;
- struct _DriKernelBO *kbo;
-
- if (!driTimeAfterEq(time, &fMan->nextCheck))
- return;
-
- for (list = fMan->timeoutList.next, next = list->next;
- list != &fMan->timeoutList;
- list = next, next = list->next) {
-
- kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
-
- if (!driTimeAfterEq(time, &kbo->timeFreed))
- break;
-
- DRMLISTDELINIT(&kbo->timeoutHead);
- DRMLISTDELINIT(&kbo->head);
- driFreeKernelBO(kbo);
- }
-
- fMan->nextCheck = *time;
- driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
-}
-
-
-/*
- * Add a _DriKernelBO to the free slab manager.
- * This means that it is available for reuse, but if it's not
- * reused in a while, it will be freed.
- */
-
-static void
-driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
- struct _DriKernelBO *kbo)
-{
- struct timeval time;
-
- _glthread_LOCK_MUTEX(fMan->mutex);
- gettimeofday(&time, NULL);
- driTimeAdd(&time, &fMan->slabTimeout);
-
- kbo->timeFreed = time;
-
- if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
- DRMLISTADD(&kbo->head, &fMan->cached);
- else
- DRMLISTADD(&kbo->head, &fMan->unCached);
-
- DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
- driFreeTimeoutKBOsLocked(fMan, &time);
-
- _glthread_UNLOCK_MUTEX(fMan->mutex);
-}
-
-/*
- * Get a _DriKernelBO for us to use as storage for a slab.
- *
- */
-
-static struct _DriKernelBO *
-driAllocKernelBO(struct _DriSlabSizeHeader *header)
-
-{
- struct _DriSlabPool *slabPool = header->slabPool;
- struct _DriFreeSlabManager *fMan = slabPool->fMan;
- drmMMListHead *list, *next, *head;
- uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
- struct _DriKernelBO *kbo;
- struct _DriKernelBO *kboTmp;
- int ret;
-
- /*
- * FIXME: We should perhaps allow some variation in slabsize in order
- * to efficiently reuse slabs.
- */
-
- size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
- size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
- _glthread_LOCK_MUTEX(fMan->mutex);
-
- kbo = NULL;
-
- retry:
- head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
- &fMan->cached : &fMan->unCached;
-
- for (list = head->next, next = list->next;
- list != head;
- list = next, next = list->next) {
-
- kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
-
- if ((kboTmp->bo.size == size) &&
- (slabPool->pageAlignment == 0 ||
- (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
-
- if (!kbo)
- kbo = kboTmp;
-
- if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
- break;
-
- }
- }
-
- if (kbo) {
- DRMLISTDELINIT(&kbo->head);
- DRMLISTDELINIT(&kbo->timeoutHead);
- }
-
- _glthread_UNLOCK_MUTEX(fMan->mutex);
-
- if (kbo) {
- uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
-
- ret = 0;
- if (new_mask) {
- ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
- new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
- }
- if (ret == 0)
- return kbo;
-
- driFreeKernelBO(kbo);
- kbo = NULL;
- goto retry;
- }
-
- kbo = calloc(1, sizeof(struct _DriKernelBO));
- if (!kbo)
- return NULL;
-
- kbo->fd = slabPool->fd;
- DRMINITLISTHEAD(&kbo->head);
- DRMINITLISTHEAD(&kbo->timeoutHead);
- ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
- slabPool->proposedFlags,
- DRM_BO_HINT_DONT_FENCE, &kbo->bo);
- if (ret)
- goto out_err0;
-
- ret = drmBOMap(kbo->fd, &kbo->bo,
- DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
- 0, &kbo->virtual);
-
- if (ret)
- goto out_err1;
-
- ret = drmBOUnmap(kbo->fd, &kbo->bo);
- if (ret)
- goto out_err1;
-
- return kbo;
-
- out_err1:
- drmBOUnreference(kbo->fd, &kbo->bo);
- out_err0:
- free(kbo);
- return NULL;
-}
-
-
-static int
-driAllocSlab(struct _DriSlabSizeHeader *header)
-{
- struct _DriSlab *slab;
- struct _DriSlabBuffer *buf;
- uint32_t numBuffers;
- int ret;
- int i;
-
- slab = calloc(1, sizeof(*slab));
- if (!slab)
- return -ENOMEM;
-
- slab->kbo = driAllocKernelBO(header);
- if (!slab->kbo) {
- ret = -ENOMEM;
- goto out_err0;
- }
-
- numBuffers = slab->kbo->bo.size / header->bufSize;
-
- slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
- if (!slab->buffers) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
- DRMINITLISTHEAD(&slab->head);
- DRMINITLISTHEAD(&slab->freeBuffers);
- slab->numBuffers = numBuffers;
- slab->numFree = 0;
- slab->header = header;
-
- buf = slab->buffers;
- for (i=0; i < numBuffers; ++i) {
- buf->parent = slab;
- buf->start = i* header->bufSize;
- buf->mapCount = 0;
- buf->isSlabBuffer = 1;
- _glthread_INIT_COND(buf->event);
- DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
- slab->numFree++;
- buf++;
- }
-
- DRMLISTADDTAIL(&slab->head, &header->slabs);
-
- return 0;
-
- out_err1:
- driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
- free(slab->buffers);
- out_err0:
- free(slab);
- return ret;
-}
-
-/*
- * Delete a buffer from the slab header delayed list and put
- * it on the slab free list.
- */
-
-static void
-driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
-{
- struct _DriSlab *slab = buf->parent;
- struct _DriSlabSizeHeader *header = slab->header;
- drmMMListHead *list = &buf->head;
-
- DRMLISTDEL(list);
- DRMLISTADDTAIL(list, &slab->freeBuffers);
- slab->numFree++;
-
- if (slab->head.next == &slab->head)
- DRMLISTADDTAIL(&slab->head, &header->slabs);
-
- if (slab->numFree == slab->numBuffers) {
- list = &slab->head;
- DRMLISTDEL(list);
- DRMLISTADDTAIL(list, &header->freeSlabs);
- }
-
- if (header->slabs.next == &header->slabs ||
- slab->numFree != slab->numBuffers) {
-
- drmMMListHead *next;
- struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
-
- for (list = header->freeSlabs.next, next = list->next;
- list != &header->freeSlabs;
- list = next, next = list->next) {
-
- slab = DRMLISTENTRY(struct _DriSlab, list, head);
-
- DRMLISTDELINIT(list);
- driSetKernelBOFree(fMan, slab->kbo);
- free(slab->buffers);
- free(slab);
- }
- }
-}
-
-static void
-driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
-{
- drmMMListHead *list, *prev, *first;
- struct _DriSlabBuffer *buf;
- struct _DriSlab *slab;
- int firstWasSignaled = 1;
- int signaled;
- int i;
- int ret;
-
- /*
- * Rerun the freeing test if the youngest tested buffer
- * was signaled, since there might be more idle buffers
- * in the delay list.
- */
-
- while (firstWasSignaled) {
- firstWasSignaled = 0;
- signaled = 0;
- first = header->delayedBuffers.next;
-
- /* Only examine the oldest 1/3 of delayed buffers:
- */
- if (header->numDelayed > 3) {
- for (i = 0; i < header->numDelayed; i += 3) {
- first = first->next;
- }
- }
-
- for (list = first, prev = list->prev;
- list != &header->delayedBuffers;
- list = prev, prev = list->prev) {
- buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
- slab = buf->parent;
-
- if (!signaled) {
- if (wait) {
- ret = driFenceFinish(buf->fence, buf->fenceType, 0);
- if (ret)
- break;
- signaled = 1;
- wait = 0;
- } else {
- signaled = driFenceSignaled(buf->fence, buf->fenceType);
- }
- if (signaled) {
- if (list == first)
- firstWasSignaled = 1;
- driFenceUnReference(&buf->fence);
- header->numDelayed--;
- driSlabFreeBufferLocked(buf);
- }
- } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
- driFenceUnReference(&buf->fence);
- header->numDelayed--;
- driSlabFreeBufferLocked(buf);
- }
- }
- }
-}
-
-
-static struct _DriSlabBuffer *
-driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
-{
- static struct _DriSlabBuffer *buf;
- struct _DriSlab *slab;
- drmMMListHead *list;
- int count = DRI_SLABPOOL_ALLOC_RETRIES;
-
- _glthread_LOCK_MUTEX(header->mutex);
- while(header->slabs.next == &header->slabs && count > 0) {
- driSlabCheckFreeLocked(header, 0);
- if (header->slabs.next != &header->slabs)
- break;
-
- _glthread_UNLOCK_MUTEX(header->mutex);
- if (count != DRI_SLABPOOL_ALLOC_RETRIES)
- usleep(1);
- _glthread_LOCK_MUTEX(header->mutex);
- (void) driAllocSlab(header);
- count--;
- }
-
- list = header->slabs.next;
- if (list == &header->slabs) {
- _glthread_UNLOCK_MUTEX(header->mutex);
- return NULL;
- }
- slab = DRMLISTENTRY(struct _DriSlab, list, head);
- if (--slab->numFree == 0)
- DRMLISTDELINIT(list);
-
- list = slab->freeBuffers.next;
- DRMLISTDELINIT(list);
-
- _glthread_UNLOCK_MUTEX(header->mutex);
- buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
- return buf;
-}
-
-static void *
-pool_create(struct _DriBufferPool *driPool, unsigned long size,
- uint64_t flags, unsigned hint, unsigned alignment)
-{
- struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
- struct _DriSlabSizeHeader *header;
- struct _DriSlabBuffer *buf;
- void *dummy;
- int i;
- int ret;
-
- /*
- * FIXME: Check for compatibility.
- */
-
- header = pool->headers;
- for (i=0; i<pool->numBuckets; ++i) {
- if (header->bufSize >= size)
- break;
- header++;
- }
-
- if (i < pool->numBuckets)
- return driSlabAllocBuffer(header);
-
-
- /*
- * Fall back to allocate a buffer object directly from DRM.
- * and wrap it in a driBO structure.
- */
-
-
- buf = calloc(1, sizeof(*buf));
-
- if (!buf)
- return NULL;
-
- buf->bo = calloc(1, sizeof(*buf->bo));
- if (!buf->bo)
- goto out_err0;
-
- if (alignment) {
- if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
- goto out_err1;
- if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
- goto out_err1;
- }
-
- ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
- flags, hint, buf->bo);
- if (ret)
- goto out_err1;
-
- ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
- 0, &dummy);
- if (ret)
- goto out_err2;
-
- ret = drmBOUnmap(pool->fd, buf->bo);
- if (ret)
- goto out_err2;
-
- return buf;
- out_err2:
- drmBOUnreference(pool->fd, buf->bo);
- out_err1:
- free(buf->bo);
- out_err0:
- free(buf);
- return NULL;
-}
-
-static int
-pool_destroy(struct _DriBufferPool *driPool, void *private)
-{
- struct _DriSlabBuffer *buf =
- (struct _DriSlabBuffer *) private;
- struct _DriSlab *slab;
- struct _DriSlabSizeHeader *header;
-
- if (!buf->isSlabBuffer) {
- struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
- int ret;
-
- ret = drmBOUnreference(pool->fd, buf->bo);
- free(buf->bo);
- free(buf);
- return ret;
- }
-
- slab = buf->parent;
- header = slab->header;
-
- _glthread_LOCK_MUTEX(header->mutex);
- buf->unFenced = 0;
- buf->mapCount = 0;
-
- if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
- DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
- header->numDelayed++;
- } else {
- if (buf->fence)
- driFenceUnReference(&buf->fence);
- driSlabFreeBufferLocked(buf);
- }
-
- _glthread_UNLOCK_MUTEX(header->mutex);
- return 0;
-}
-
-static int
-pool_waitIdle(struct _DriBufferPool *driPool, void *private,
- _glthread_Mutex *mutex, int lazy)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- while(buf->unFenced)
- _glthread_COND_WAIT(buf->event, *mutex);
-
- if (!buf->fence)
- return 0;
-
- driFenceFinish(buf->fence, buf->fenceType, lazy);
- driFenceUnReference(&buf->fence);
-
- return 0;
-}
-
-static int
-pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
- int hint, _glthread_Mutex *mutex, void **virtual)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
- int busy;
-
- if (buf->isSlabBuffer)
- busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
- else
- busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
-
-
- if (busy) {
- if (hint & DRM_BO_HINT_DONT_BLOCK)
- return -EBUSY;
- else {
- (void) pool_waitIdle(pool, private, mutex, 0);
- }
- }
-
- ++buf->mapCount;
- *virtual = (buf->isSlabBuffer) ?
- (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
- (void *) buf->bo->virtual;
-
- return 0;
-}
-
-static int
-pool_unmap(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- --buf->mapCount;
- if (buf->mapCount == 0 && buf->isSlabBuffer)
- _glthread_COND_BROADCAST(buf->event);
-
- return 0;
-}
-
-static unsigned long
-pool_offset(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
- struct _DriSlab *slab;
- struct _DriSlabSizeHeader *header;
-
- if (!buf->isSlabBuffer) {
- assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
- return buf->bo->offset;
- }
-
- slab = buf->parent;
- header = slab->header;
-
- (void) header;
- assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
- return slab->kbo->bo.offset + buf->start;
-}
-
-static unsigned long
-pool_poolOffset(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- return buf->start;
-}
-
-static uint64_t
-pool_flags(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- if (!buf->isSlabBuffer)
- return buf->bo->flags;
-
- return buf->parent->kbo->bo.flags;
-}
-
-static unsigned long
-pool_size(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
- if (!buf->isSlabBuffer)
- return buf->bo->size;
-
- return buf->parent->header->bufSize;
-}
-
-static int
-pool_fence(struct _DriBufferPool *pool, void *private,
- struct _DriFenceObject *fence)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
- drmBO *bo;
-
- if (buf->fence)
- driFenceUnReference(&buf->fence);
-
- buf->fence = driFenceReference(fence);
- bo = (buf->isSlabBuffer) ?
- &buf->parent->kbo->bo:
- buf->bo;
- buf->fenceType = bo->fenceFlags;
-
- buf->unFenced = 0;
- _glthread_COND_BROADCAST(buf->event);
-
- return 0;
-}
-
-static drmBO *
-pool_kernel(struct _DriBufferPool *pool, void *private)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
-}
-
-static int
-pool_validate(struct _DriBufferPool *pool, void *private,
- _glthread_Mutex *mutex)
-{
- struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
-
- if (!buf->isSlabBuffer)
- return 0;
-
- while(buf->mapCount != 0)
- _glthread_COND_WAIT(buf->event, *mutex);
-
- buf->unFenced = 1;
- return 0;
-}
-
-
-struct _DriFreeSlabManager *
-driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
-{
- struct _DriFreeSlabManager *tmp;
-
- tmp = calloc(1, sizeof(*tmp));
- if (!tmp)
- return NULL;
-
- _glthread_INIT_MUTEX(tmp->mutex);
- _glthread_LOCK_MUTEX(tmp->mutex);
- tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
- tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
- tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
-
- tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
- tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
- tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
-
- gettimeofday(&tmp->nextCheck, NULL);
- driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
- DRMINITLISTHEAD(&tmp->timeoutList);
- DRMINITLISTHEAD(&tmp->unCached);
- DRMINITLISTHEAD(&tmp->cached);
- _glthread_UNLOCK_MUTEX(tmp->mutex);
-
- return tmp;
-}
-
-void
-driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
-{
- struct timeval time;
-
- time = fMan->nextCheck;
- driTimeAdd(&time, &fMan->checkInterval);
-
- _glthread_LOCK_MUTEX(fMan->mutex);
- driFreeTimeoutKBOsLocked(fMan, &time);
- _glthread_UNLOCK_MUTEX(fMan->mutex);
-
- assert(fMan->timeoutList.next == &fMan->timeoutList);
- assert(fMan->unCached.next == &fMan->unCached);
- assert(fMan->cached.next == &fMan->cached);
-
- free(fMan);
-}
-
-static void
-driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
- struct _DriSlabSizeHeader *header)
-{
- _glthread_INIT_MUTEX(header->mutex);
- _glthread_LOCK_MUTEX(header->mutex);
-
- DRMINITLISTHEAD(&header->slabs);
- DRMINITLISTHEAD(&header->freeSlabs);
- DRMINITLISTHEAD(&header->delayedBuffers);
-
- header->numDelayed = 0;
- header->slabPool = pool;
- header->bufSize = size;
-
- _glthread_UNLOCK_MUTEX(header->mutex);
-}
-
-static void
-driFinishSizeHeader(struct _DriSlabSizeHeader *header)
-{
- drmMMListHead *list, *next;
- struct _DriSlabBuffer *buf;
-
- _glthread_LOCK_MUTEX(header->mutex);
- for (list = header->delayedBuffers.next, next = list->next;
- list != &header->delayedBuffers;
- list = next, next = list->next) {
-
- buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
- if (buf->fence) {
- (void) driFenceFinish(buf->fence, buf->fenceType, 0);
- driFenceUnReference(&buf->fence);
- }
- header->numDelayed--;
- driSlabFreeBufferLocked(buf);
- }
- _glthread_UNLOCK_MUTEX(header->mutex);
-}
-
-static void
-pool_takedown(struct _DriBufferPool *driPool)
-{
- struct _DriSlabPool *pool = driPool->data;
- int i;
-
- for (i=0; i<pool->numBuckets; ++i) {
- driFinishSizeHeader(&pool->headers[i]);
- }
-
- free(pool->headers);
- free(pool->bucketSizes);
- free(pool);
- free(driPool);
-}
-
-struct _DriBufferPool *
-driSlabPoolInit(int fd, uint64_t flags,
- uint64_t validMask,
- uint32_t smallestSize,
- uint32_t numSizes,
- uint32_t desiredNumBuffers,
- uint32_t maxSlabSize,
- uint32_t pageAlignment,
- struct _DriFreeSlabManager *fMan)
-{
- struct _DriBufferPool *driPool;
- struct _DriSlabPool *pool;
- uint32_t i;
-
- driPool = calloc(1, sizeof(*driPool));
- if (!driPool)
- return NULL;
-
- pool = calloc(1, sizeof(*pool));
- if (!pool)
- goto out_err0;
-
- pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
- if (!pool->bucketSizes)
- goto out_err1;
-
- pool->headers = calloc(numSizes, sizeof(*pool->headers));
- if (!pool->headers)
- goto out_err2;
-
- pool->fMan = fMan;
- pool->proposedFlags = flags;
- pool->validMask = validMask;
- pool->numBuckets = numSizes;
- pool->pageSize = getpagesize();
- pool->fd = fd;
- pool->pageAlignment = pageAlignment;
- pool->maxSlabSize = maxSlabSize;
- pool->desiredNumBuffers = desiredNumBuffers;
-
- for (i=0; i<pool->numBuckets; ++i) {
- pool->bucketSizes[i] = (smallestSize << i);
- driInitSizeHeader(pool, pool->bucketSizes[i],
- &pool->headers[i]);
- }
-
- driPool->data = (void *) pool;
- driPool->map = &pool_map;
- driPool->unmap = &pool_unmap;
- driPool->destroy = &pool_destroy;
- driPool->offset = &pool_offset;
- driPool->poolOffset = &pool_poolOffset;
- driPool->flags = &pool_flags;
- driPool->size = &pool_size;
- driPool->create = &pool_create;
- driPool->fence = &pool_fence;
- driPool->kernel = &pool_kernel;
- driPool->validate = &pool_validate;
- driPool->waitIdle = &pool_waitIdle;
- driPool->takeDown = &pool_takedown;
-
- return driPool;
-
- out_err2:
- free(pool->bucketSizes);
- out_err1:
- free(pool);
- out_err0:
- free(driPool);
-
- return NULL;
-}