return buf->bufmgr->bo_unmap(buf);
}
-void
-dri_fence_wait(dri_fence *fence)
-{
- fence->bufmgr->fence_wait(fence);
-}
-
-void
-dri_fence_reference(dri_fence *fence)
-{
- fence->bufmgr->fence_reference(fence);
-}
-
-void
-dri_fence_unreference(dri_fence *fence)
-{
- if (fence == NULL)
- return;
-
- fence->bufmgr->fence_unreference(fence);
-}
-
void
dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data)
return batch_buf->bufmgr->process_relocs(batch_buf);
}
-void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+void dri_post_submit(dri_bo *batch_buf)
{
- batch_buf->bufmgr->post_submit(batch_buf, last_fence);
+ batch_buf->bufmgr->post_submit(batch_buf);
}
void
typedef struct _dri_bufmgr dri_bufmgr;
typedef struct _dri_bo dri_bo;
-typedef struct _dri_fence dri_fence;
struct _dri_bo {
/**
dri_bufmgr *bufmgr;
};
-struct _dri_fence {
- /**
- * This is an ORed mask of DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE, and
- * DRM_FLAG_EXE indicating the operations associated with this fence.
- *
- * It is constant for the life of the fence object.
- */
- unsigned int type;
- /** Buffer manager context associated with this fence */
- dri_bufmgr *bufmgr;
-};
-
/**
* Context for a buffer manager instance.
*
/**
* Maps the buffer into userspace.
*
- * This function will block waiting for any existing fence on the buffer to
- * clear, first. The resulting mapping is available at buf->virtual.
-\ */
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
int (*bo_map)(dri_bo *buf, GLboolean write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
- /** Takes a reference on a fence object */
- void (*fence_reference)(dri_fence *fence);
-
- /**
- * Releases a reference on a fence object, freeing the data if
- * rerefences remain.
- */
- void (*fence_unreference)(dri_fence *fence);
-
- /**
- * Blocks until the given fence is signaled.
- */
- void (*fence_wait)(dri_fence *fence);
-
/**
* Tears down the buffer manager instance.
*/
*/
void *(*process_relocs)(dri_bo *batch_buf);
- void (*post_submit)(dri_bo *batch_buf, dri_fence **fence);
+ void (*post_submit)(dri_bo *batch_buf);
int (*check_aperture_space)(dri_bo *bo);
GLboolean debug; /**< Enables verbose debugging printouts */
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, GLboolean write_enable);
int dri_bo_unmap(dri_bo *buf);
-void dri_fence_wait(dri_fence *fence);
-void dri_fence_reference(dri_fence *fence);
-void dri_fence_unreference(dri_fence *fence);
void dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data);
GLuint offset, dri_bo *target_buf);
void *dri_process_relocs(dri_bo *batch_buf);
void dri_post_process_relocs(dri_bo *batch_buf);
-void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence);
+void dri_post_submit(dri_bo *batch_buf);
int dri_bufmgr_check_aperture_space(dri_bo *bo);
#endif
void *invalidate_ptr;
} dri_bo_fake;
-typedef struct _dri_fence_fake {
- dri_fence fence;
-
- const char *name;
- unsigned int refcount;
- unsigned int fence_cookie;
- GLboolean flushed;
-} dri_fence_fake;
-
static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
unsigned int fence_cookie);
return 0;
}
-static dri_fence *
-dri_fake_fence_validated(dri_bufmgr *bufmgr, const char *name,
- GLboolean flushed)
+static void
+dri_fake_fence_validated(dri_bufmgr *bufmgr)
{
- dri_fence_fake *fence_fake;
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
unsigned int cookie;
- fence_fake = malloc(sizeof(*fence_fake));
- if (!fence_fake)
- return NULL;
-
- fence_fake->refcount = 1;
- fence_fake->name = name;
- fence_fake->flushed = flushed;
- fence_fake->fence.bufmgr = bufmgr;
-
cookie = _fence_emit_internal(bufmgr_fake);
- fence_fake->fence_cookie = cookie;
fence_blocks(bufmgr_fake, cookie);
- DBG("drm_fence_validated: 0x%08x cookie\n", fence_fake->fence_cookie);
-
- return &fence_fake->fence;
-}
-
-static void
-dri_fake_fence_reference(dri_fence *fence)
-{
- dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
-
- ++fence_fake->refcount;
-}
-
-static void
-dri_fake_fence_unreference(dri_fence *fence)
-{
- dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
-
- if (!fence)
- return;
-
- if (--fence_fake->refcount == 0) {
- free(fence);
- return;
- }
-}
-
-static void
-dri_fake_fence_wait(dri_fence *fence)
-{
- dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr;
-
- DBG("drm_fence_wait: 0x%08x cookie\n", fence_fake->fence_cookie);
-
- _fence_wait_internal(bufmgr_fake, fence_fake->fence_cookie);
+ DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
}
static void
static void
-dri_fake_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+dri_fake_post_submit(dri_bo *batch_buf)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
- dri_fence *fo;
-
- fo = dri_fake_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
-
- if (bufmgr_fake->performed_rendering) {
- dri_fence_unreference(*last_fence);
- *last_fence = fo;
- } else {
- dri_fence_unreference(fo);
- }
+ dri_fake_fence_validated(batch_buf->bufmgr);
dri_bo_fake_post_submit(batch_buf);
}
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
- bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait;
- bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference;
- bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference;
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
intel_tris.c \
intel_fbo.c \
intel_depthstencil.c \
- intel_bufmgr_ttm.c
+ intel_bufmgr_gem.c
C_SOURCES = \
$(COMMON_SOURCES) \
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel = intel;
- batch->last_fence = NULL;
intel_batchbuffer_reset(batch);
return batch;
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- if (batch->last_fence) {
- dri_fence_wait(batch->last_fence);
- dri_fence_unreference(batch->last_fence);
- batch->last_fence = NULL;
- }
if (batch->map) {
dri_bo_unmap(batch->buf);
batch->map = NULL;
used,
batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock,
- execbuf, &batch->last_fence);
+ execbuf);
} else {
dri_process_relocs(batch->buf);
intel_batch_ioctl(batch->intel,
allow_unlock);
}
}
-
- dri_post_submit(batch->buf, &batch->last_fence);
+
+ dri_post_submit(batch->buf);
if (intel->numClipRects == 0 &&
batch->cliprect_mode == LOOP_CLIPRECTS) {
UNLOCK_HARDWARE(intel);
if (INTEL_DEBUG & DEBUG_SYNC) {
+ int irq;
+
fprintf(stderr, "waiting for idle\n");
- if (batch->last_fence != NULL)
- dri_fence_wait(batch->last_fence);
+ LOCK_HARDWARE(intel);
+ irq = intelEmitIrqLocked(intel);
+ UNLOCK_HARDWARE(intel);
+ intelWaitIrq(intel, irq);
}
/* Reset the buffer:
intel_batchbuffer_reset(batch);
}
-void
-intel_batchbuffer_finish(struct intel_batchbuffer *batch)
-{
- intel_batchbuffer_flush(batch);
- if (batch->last_fence != NULL)
- dri_fence_wait(batch->last_fence);
-}
-
/* This is the only way buffers get added to the validate list.
*/
struct intel_context *intel;
dri_bo *buf;
- dri_fence *last_fence;
GLubyte *map;
GLubyte *ptr;
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
-void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
-
void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
const char *file, int line);
intelScreen = intel->intelScreen;
- if (intel->last_swap_fence) {
- dri_fence_wait(intel->last_swap_fence);
- dri_fence_unreference(intel->last_swap_fence);
- intel->last_swap_fence = NULL;
- }
- intel->last_swap_fence = intel->first_swap_fence;
- intel->first_swap_fence = NULL;
-
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
*/
ADVANCE_BATCH();
}
- if (intel->first_swap_fence)
- dri_fence_unreference(intel->first_swap_fence);
intel_batchbuffer_flush(intel->batch);
- intel->first_swap_fence = intel->batch->last_fence;
- if (intel->first_swap_fence)
- dri_fence_reference(intel->first_swap_fence);
}
UNLOCK_HARDWARE(intel);
void *virtual;
} dri_bo_gem;
-typedef struct _dri_fence_gem
-{
- dri_fence fence;
-
- int refcount;
- const char *name;
- drmFence drm_fence;
-} dri_fence_gem;
-
static int
logbase2(int n)
{
return 0;
}
-static void
-dri_gem_fence_reference(dri_fence *fence)
-{
- dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
-
- ++fence_gem->refcount;
- DBG("fence_reference: %p (%s)\n", &fence_gem->fence, fence_gem->name);
-}
-
-static void
-dri_gem_fence_unreference(dri_fence *fence)
-{
- dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
-
- if (!fence)
- return;
-
- DBG("fence_unreference: %p (%s)\n", &fence_gem->fence, fence_gem->name);
-
- if (--fence_gem->refcount == 0) {
- int ret;
-
- ret = drmFenceUnreference(bufmgr_gem->fd, &fence_gem->drm_fence);
- if (ret != 0) {
- fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
- fence_gem->name, strerror(-ret));
- }
-
- free(fence);
- return;
- }
-}
-
-static void
-dri_gem_fence_wait(dri_fence *fence)
-{
- dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
- int ret;
-
- ret = drmFenceWait(bufmgr_gem->fd, DRM_FENCE_FLAG_WAIT_LAZY, &fence_gem->drm_fence, 0);
- if (ret != 0) {
- fprintf(stderr, "%s:%d: Error waiting for fence %s: %s.\n",
- __FILE__, __LINE__, fence_gem->name, strerror(-ret));
- abort();
- }
-
- DBG("fence_wait: %p (%s)\n", &fence_gem->fence, fence_gem->name);
-}
-
static void
dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
{
}
static void
-dri_gem_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+dri_gem_post_submit(dri_bo *batch_buf)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
int i;
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
- * \param fence_type Driver-specific fence type used for fences with no flush.
- * \param fence_type_flush Driver-specific fence type used for fences with a
- * flush.
*/
dri_bufmgr *
intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
- bufmgr_gem->bufmgr.fence_reference = dri_gem_fence_reference;
- bufmgr_gem->bufmgr.fence_unreference = dri_gem_fence_unreference;
- bufmgr_gem->bufmgr.fence_wait = dri_gem_fence_wait;
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc;
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
#include "dri_bufmgr.h"
-extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
+extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr,
+ const char *name,
unsigned int handle);
-dri_fence *intel_gem_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
- drm_fence_arg_t *arg);
-
-
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
void
#include "intel_buffer_objects.h"
#include "intel_fbo.h"
#include "intel_decode.h"
-#include "intel_bufmgr_ttm.h"
+#include "intel_bufmgr_gem.h"
#include "drirenderbuffer.h"
#include "vblank.h"
void
intelFinish(GLcontext * ctx)
{
- struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ int i;
+
intelFlush(ctx);
- if (intel->batch->last_fence) {
- dri_fence_wait(intel->batch->last_fence);
- dri_fence_unreference(intel->batch->last_fence);
- intel->batch->last_fence = NULL;
+
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ /* XXX: Wait on buffer idle */
+ }
+ if (fb->_DepthBuffer) {
+ /* XXX: Wait on buffer idle */
}
}
intel_init_bufmgr(struct intel_context *intel)
{
intelScreenPrivate *intelScreen = intel->intelScreen;
- GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
- GLboolean ttm_supported;
+ GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
+ GLboolean gem_supported;
- /* If we've got a new enough DDX that's initializing TTM and giving us
+ /* If we've got a new enough DDX that's initializing GEM and giving us
* object handles for the shared buffers, use that.
*/
intel->ttm = GL_FALSE;
if (intel->intelScreen->driScrnPriv->dri2.enabled)
- ttm_supported = GL_TRUE;
+ gem_supported = GL_TRUE;
else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
intel->intelScreen->drmMinor >= 11 &&
intel->intelScreen->front.bo_handle != -1)
- ttm_supported = GL_TRUE;
+ gem_supported = GL_TRUE;
else
- ttm_supported = GL_FALSE;
+ gem_supported = GL_FALSE;
- if (!ttm_disable && ttm_supported) {
+ if (!gem_disable && gem_supported) {
int bo_reuse_mode;
- intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
- DRM_FENCE_TYPE_EXE,
- DRM_FENCE_TYPE_EXE |
- DRM_I915_FENCE_TYPE_RW,
+ intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
BATCH_SZ);
if (intel->bufmgr != NULL)
intel->ttm = GL_TRUE;
case DRI_CONF_BO_REUSE_DISABLED:
break;
case DRI_CONF_BO_REUSE_ALL:
- intel_ttm_enable_bo_reuse(intel->bufmgr);
+ intel_gem_enable_bo_reuse(intel->bufmgr);
break;
}
}
/* Otherwise, use the classic buffer manager. */
if (intel->bufmgr == NULL) {
- if (ttm_disable) {
- fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
+ if (gem_disable) {
+ fprintf(stderr, "GEM disabled. Using classic.\n");
} else {
- fprintf(stderr, "Failed to initialize TTM buffer manager. "
+ fprintf(stderr, "Failed to initialize GEM. "
"Falling back to classic.\n");
}
intel_recreate_static_regions(intel);
intel->batch = intel_batchbuffer_alloc(intel);
- intel->last_swap_fence = NULL;
- intel->first_swap_fence = NULL;
intel_bufferobj_init(intel);
intel_fbo_init(intel);
intel_batchbuffer_free(intel->batch);
- if (intel->last_swap_fence) {
- dri_fence_wait(intel->last_swap_fence);
- dri_fence_unreference(intel->last_swap_fence);
- intel->last_swap_fence = NULL;
- }
- if (intel->first_swap_fence) {
- dri_fence_wait(intel->first_swap_fence);
- dri_fence_unreference(intel->first_swap_fence);
- intel->first_swap_fence = NULL;
- }
-
if (release_texture_heaps) {
/* This share group is about to go away, free our private
* texture object data.
*/
GLboolean ttm;
- dri_fence *last_swap_fence;
- dri_fence *first_swap_fence;
-
struct intel_batchbuffer *batch;
GLboolean no_batch_wrap;
unsigned batch_id;
#include "drm.h"
#include "i915_drm.h"
-#include "intel_bufmgr_ttm.h"
+#include "intel_bufmgr_gem.h"
#define FILE_DEBUG_FLAG DEBUG_IOCTL
intel_exec_ioctl(struct intel_context *intel,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock,
- struct drm_i915_gem_execbuffer *execbuf,
- dri_fence **fence)
+ struct drm_i915_gem_execbuffer *execbuf)
{
- dri_fence *fo;
int ret;
assert(intel->locked);
if (intel->no_hw)
return;
- if (*fence) {
- dri_fence_unreference(*fence);
- }
-
memset(&execbuf, 0, sizeof(execbuf));
execbuf->batch_start_offset = 0;
UNLOCK_HARDWARE(intel);
exit(1);
}
-
- fo = intel_ttm_fence_create_from_arg(intel->bufmgr, "fence buffers",
- &execbuf.fence_arg);
- if (!fo) {
- fprintf(stderr, "failed to fence handle: %08x\n", execbuf.fence_arg.handle);
- UNLOCK_HARDWARE(intel);
- exit(1);
- }
- *fence = fo;
}
void intel_exec_ioctl(struct intel_context *intel,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock,
- struct drm_i915_gem_execbuffer *execbuf,
- dri_fence **fence);
+ struct drm_i915_gem_execbuffer *execbuf);
#endif
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "dri_bufmgr.h"
-#include "intel_bufmgr_ttm.h"
+#include "intel_bufmgr_gem.h"
#include "intel_batchbuffer.h"
#define FILE_DEBUG_FLAG DEBUG_REGION
{
dri_bo *buffer;
- buffer = intel_ttm_bo_create_from_handle(intel->bufmgr, "region", handle);
+ buffer = intel_gem_bo_create_from_handle(intel->bufmgr, "region", handle);
return intel_region_alloc_internal(intel,
cpp, pitch, height, tiled, buffer);
if (intel->ttm) {
assert(region_desc->bo_handle != -1);
- region->buffer = intel_ttm_bo_create_from_handle(intel->bufmgr,
+ region->buffer = intel_gem_bo_create_from_handle(intel->bufmgr,
name,
region_desc->bo_handle);
} else {
#include "i830_dri.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
-#include "intel_bufmgr_ttm.h"
+#include "intel_bufmgr_gem.h"
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN